code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Tuple = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int=None , __magic_name__ : str=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : float
snake_case__ : str
snake_case__ : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int = 4_2
snake_case__ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( __lowercase ):
snake_case__ : Optional[int] = "titi"
snake_case__ : str = "toto"
class _SCREAMING_SNAKE_CASE ( __lowercase ):
snake_case__ : Dict = "titi"
snake_case__ : str = "toto"
snake_case__ : Dict = 4_2
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : BasicEnum = "toto"
def _A ( self : Any ):
UpperCamelCase :int = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : MixedTypeEnum = "toto"
def _A ( self : int ):
UpperCamelCase :Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[int] = None
snake_case__ : Optional[float] = field(default=__lowercase , metadata={"""help""": """help message"""} )
snake_case__ : Optional[str] = None
snake_case__ : Optional[List[str]] = list_field(default=[] )
snake_case__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = list_field(default=[] )
snake_case__ : List[int] = list_field(default=[1, 2, 3] )
snake_case__ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = field()
snake_case__ : str = field()
snake_case__ : BasicEnum = field()
def _A ( self : Optional[Any] ):
UpperCamelCase :Tuple = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : "BasicEnum" = field()
snake_case__ : "Optional[bool]" = None
snake_case__ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
snake_case__ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int | None = None
snake_case__ : float | None = field(default=__lowercase , metadata={"""help""": """help message"""} )
snake_case__ : str | None = None
snake_case__ : list[str] | None = list_field(default=[] )
snake_case__ : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase :int = {k: v for k, v in vars(snake_case_ ).items() if k != '''container'''}
UpperCamelCase :str = {k: v for k, v in vars(snake_case_ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , snake_case_ ) and yy.get("""choices""" , snake_case_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](snake_case_ ) , yy["""type"""](snake_case_ ) )
del xx["type"], yy["type"]
self.assertEqual(snake_case_ , snake_case_ )
def _A ( self : Tuple ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(snake_case_ )
UpperCamelCase :str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=snake_case_ , required=snake_case_ )
expected.add_argument("""--bar""" , type=snake_case_ , required=snake_case_ )
expected.add_argument("""--baz""" , type=snake_case_ , required=snake_case_ )
expected.add_argument("""--flag""" , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs="""?""" )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :Optional[Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(UpperCamelCase ) :str = parser.parse_args_into_dataclasses(snake_case_ , look_for_args_file=snake_case_ )
self.assertFalse(example.flag )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = HfArgumentParser(snake_case_ )
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=snake_case_ )
expected.add_argument("""--baz""" , default="""toto""" , type=snake_case_ , help="""help message""" )
self.argparsersEqual(snake_case_ , snake_case_ )
def _A ( self : List[str] ):
UpperCamelCase :int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=snake_case_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=snake_case_ , default=snake_case_ )
UpperCamelCase :int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case_ )
for dataclass_type in dataclass_types:
UpperCamelCase :Optional[Any] = HfArgumentParser(snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase :List[str] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase :str = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase :Dict = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(snake_case_ )
UpperCamelCase :Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :str = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :Any = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase :List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Dict = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase :Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCamelCase :Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _A ( self : Optional[int] ):
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase :int = HfArgumentParser(snake_case_ )
UpperCamelCase :List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :Dict = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _A ( self : Dict ):
UpperCamelCase :List[Any] = HfArgumentParser(snake_case_ )
UpperCamelCase :Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=snake_case_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=snake_case_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=snake_case_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :str = parser.parse_args([] )
self.assertEqual(
snake_case_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase :int = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(snake_case_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _A ( self : Dict ):
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=snake_case_ , type=snake_case_ )
expected.add_argument("""--bar""" , default=snake_case_ , type=snake_case_ , help="""help message""" )
expected.add_argument("""--baz""" , default=snake_case_ , type=snake_case_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=snake_case_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=snake_case_ )
UpperCamelCase :Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case_ )
for dataclass_type in dataclass_types:
UpperCamelCase :List[Any] = HfArgumentParser(snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase :Dict = parser.parse_args([] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , bar=snake_case_ , baz=snake_case_ , ces=[] , des=[] ) )
UpperCamelCase :List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(snake_case_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _A ( self : int ):
UpperCamelCase :str = HfArgumentParser(snake_case_ )
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=snake_case_ , required=snake_case_ )
expected.add_argument("""--required_str""" , type=snake_case_ , required=snake_case_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=snake_case_ , )
self.argparsersEqual(snake_case_ , snake_case_ )
def _A ( self : Dict ):
UpperCamelCase :Tuple = HfArgumentParser(snake_case_ )
UpperCamelCase :Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=snake_case_ , required=snake_case_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=snake_case_ , )
expected.add_argument("""--opt""" , type=snake_case_ , default=snake_case_ )
expected.add_argument("""--baz""" , default="""toto""" , type=snake_case_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
def _A ( self : Dict ):
UpperCamelCase :int = HfArgumentParser(snake_case_ )
UpperCamelCase :Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCamelCase :Any = parser.parse_dict(snake_case_ )[0]
UpperCamelCase :Tuple = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def _A ( self : int ):
UpperCamelCase :Tuple = HfArgumentParser(snake_case_ )
UpperCamelCase :Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(snake_case_ , parser.parse_dict , snake_case_ , allow_extra_keys=snake_case_ )
def _A ( self : List[Any] ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(snake_case_ )
UpperCamelCase :Any = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Tuple = os.path.join(snake_case_ , """temp_json""" )
os.mkdir(snake_case_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(snake_case_ , snake_case_ )
UpperCamelCase :Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCamelCase :List[Any] = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def _A ( self : int ):
UpperCamelCase :Any = HfArgumentParser(snake_case_ )
UpperCamelCase :Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Dict = os.path.join(snake_case_ , """temp_yaml""" )
os.mkdir(snake_case_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(snake_case_ , snake_case_ )
UpperCamelCase :Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCamelCase :Optional[int] = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def _A ( self : List[Any] ):
UpperCamelCase :Optional[int] = HfArgumentParser(snake_case_ )
self.assertIsNotNone(snake_case_ )
| 38 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase__ =logging.WARNING
def __UpperCamelCase ( ):
__a : Optional[Any] = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __UpperCamelCase ( ):
return __name__.split('''.''' )[0]
def __UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def __UpperCamelCase ( ):
# Apply our default configuration to the library root logger.
__a : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __UpperCamelCase ( ):
__a : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[str] = None ):
if name is None:
__a : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Union[str, Any] = False
def __UpperCamelCase ( ):
__a : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase__ :
def __init__(self : str , *snake_case_ : str , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument
__a : Optional[Any] = args[0] if args else None
def __iter__(self : List[str] ):
return iter(self._iterator )
def __getattr__(self : str , snake_case_ : Optional[Any] ):
def empty_fn(*snake_case_ : int , **snake_case_ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Union[str, Any] ):
return self
def __exit__(self : str , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[Any] ):
return
lowercase__ =True
class UpperCamelCase__ :
def __call__(self : Tuple , *snake_case_ : str , snake_case_ : str=False , **snake_case_ : Dict ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case_ , **snake_case_ )
else:
return EmptyTqdm(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
__a : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ =_tqdm_cls()
def __UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase ( ):
global _tqdm_active
__a : Dict = True
def __UpperCamelCase ( ):
global _tqdm_active
__a : Union[str, Any] = False
| 216 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
def run_func(lowerCAmelCase_ : Any ):
@wraps(__a )
def run_in_eager_mode(*lowerCAmelCase_ : Optional[int] ,**lowerCAmelCase_ : Tuple ):
return func(*__a ,**__a )
@wraps(__a )
@tf.function(experimental_compile=__a )
def run_in_graph_mode(*lowerCAmelCase_ : Tuple ,**lowerCAmelCase_ : Optional[int] ):
return func(*__a ,**__a )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= random.Random()
UpperCAmelCase_= [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__a ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class lowercase ( snake_case_):
"""simple docstring"""
a__ : TensorFlowBenchmarkArguments
a__ : PretrainedConfig
a__ : str = "TensorFlow"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return tf.__version__
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
UpperCAmelCase_= self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCAmelCase_= self._prepare_inference_func(_A , _A , _A )
return self._measure_speed(_inference )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
UpperCAmelCase_= self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCAmelCase_= self._prepare_train_func(_A , _A , _A )
return self._measure_speed(_train )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
UpperCAmelCase_= self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCAmelCase_= self._prepare_inference_func(_A , _A , _A )
return self._measure_memory(_inference )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
UpperCAmelCase_= self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCAmelCase_= self._prepare_train_func(_A , _A , _A )
return self._measure_memory(_train )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Callable[[], None]:
UpperCAmelCase_= self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCAmelCase_= (
hasattr(_A , """architectures""" )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_= 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_= __import__("""transformers""" , fromlist=[model_class] )
UpperCAmelCase_= getattr(_A , _A )
UpperCAmelCase_= model_cls(_A )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCAmelCase_= TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_= config.vocab_size if hasattr(_A , """vocab_size""" ) else config.encoder.vocab_size
UpperCAmelCase_= random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_A , decoder_input_ids=_A , training=_A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_A , training=_A )
UpperCAmelCase_= encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Callable[[], None]:
UpperCAmelCase_= self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCAmelCase_= (
hasattr(_A , """architectures""" )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_= 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_= __import__("""transformers""" , fromlist=[model_class] )
UpperCAmelCase_= getattr(_A , _A )
UpperCAmelCase_= model_cls(_A )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCAmelCase_= TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_= config.vocab_size if hasattr(_A , """vocab_size""" ) else config.encoder.vocab_size
UpperCAmelCase_= random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_= model(_A , decoder_input_ids=_A , labels=_A , training=_A )[0]
UpperCAmelCase_= tf.gradients(_A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase_= model(_A , labels=_A , training=_A )[0]
UpperCAmelCase_= tf.gradients(_A , model.trainable_variables )
return gradients
UpperCAmelCase_= encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Union[str, Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_= timeit.repeat(
_A , repeat=self.args.repeat , number=10 , )
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCAmelCase_= start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCAmelCase_= 'N/A'
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_= nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_= nvml.nvmlDeviceGetMemoryInfo(_A )
UpperCAmelCase_= meminfo.used
UpperCAmelCase_= Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCAmelCase_= None
else:
UpperCAmelCase_= measure_peak_memory_cpu(_A )
UpperCAmelCase_= Memory(_A ) if isinstance(_A , _A ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_= stop_memory_tracing(_A )
if memory is None:
UpperCAmelCase_= summary.total
else:
UpperCAmelCase_= None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 350 |
import pytest
import datasets
# Import fixture modules as plugins
__A = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def __a ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
config.addinivalue_line("""markers""" ,"""torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= tmp_path_factory.getbasetemp() / """cache"""
UpperCAmelCase_= test_hf_cache_home / """datasets"""
UpperCAmelCase_= test_hf_cache_home / """metrics"""
UpperCAmelCase_= test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ ,scope="""session""" )
def __a ( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" ,lowerCAmelCase_ )
@pytest.fixture
def __a ( lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" ,lowerCAmelCase_ )
| 277 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 ):
'''simple docstring'''
lowercase__ : Optional[Any] = set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowercase__ : str = [float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 353 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : Tuple = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Any = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowercase__ : Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
lowercase__ : int = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : Dict = name.split('.' )
lowercase__ : List[str] = int(items[0] )
lowercase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = UniSpeechSatConfig()
lowercase__ : str = ''
if is_finetuned:
lowercase__ : Any = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[Any] = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ , lowercase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 | 0 |
"""simple docstring"""
import math
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float ) -> float:
return math.pow(_lowerCAmelCase, 2 ) - a
def UpperCamelCase ( _lowerCAmelCase : float ) -> float:
return 2 * x
def UpperCamelCase ( _lowerCAmelCase : float ) -> float:
_UpperCAmelCase : Any = 2.0
while start <= a:
_UpperCAmelCase : Union[str, Any] = math.pow(_lowerCAmelCase, 2 )
return start
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : int = 9999, _lowerCAmelCase : float = 0.00000000000001 ) -> float:
if a < 0:
raise ValueError("""math domain error""" )
_UpperCAmelCase : List[Any] = get_initial_point(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
_UpperCAmelCase : Any = value
_UpperCAmelCase : Dict = value - fx(_lowerCAmelCase, _lowerCAmelCase ) / fx_derivative(_lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ : List[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ : Optional[Any] = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
class _UpperCAmelCase ( __a):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
__a : Dict = TaTokenizer
__a : List[int] = []
def __init__( self , _A=None , _A=None , _A="</s>" , _A="<unk>" , _A="<pad>" , _A=1_00 , _A=None , **_A , ) -> Union[str, Any]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase : Any = [f'''<extra_id_{i}>''' for i in range(_A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCAmelCase : List[str] = len(set(filter(lambda _A : bool("""extra_id_""" in str(_A ) ) , _A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
_A , tokenizer_file=_A , eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
_UpperCAmelCase : int = vocab_file
_UpperCAmelCase : Any = False if not self.vocab_file else True
_UpperCAmelCase : Optional[Any] = extra_ids
@staticmethod
def __snake_case ( _A , _A , _A ) -> Optional[int]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCAmelCase : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _A , )
return max_model_length
def __snake_case ( self , _A , _A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : List[Any] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCAmelCase : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda _A : bool(re.search(r"""<extra_id_\d+>""" , _A ) ) is not None , self.additional_special_tokens ) ) )
def __snake_case ( self ) -> int:
'''simple docstring'''
return [self.convert_tokens_to_ids(_A ) for token in self.get_sentinel_tokens()]
| 246 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : List[str] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase_( snake_case : np.ndarray , snake_case : Optional[str] , snake_case : Optional[str] ):
'''simple docstring'''
snake_case_ = to_pil_image(snake_case )
snake_case_ , snake_case_ = pil_image.size
snake_case_ = pytesseract.image_to_data(snake_case , lang=snake_case , output_type="dict" , config=snake_case )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
snake_case_ = [idx for idx, word in enumerate(snake_case ) if not word.strip()]
snake_case_ = [word for idx, word in enumerate(snake_case ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(snake_case ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(snake_case ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(snake_case ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ = []
for x, y, w, h in zip(snake_case , snake_case , snake_case , snake_case ):
snake_case_ = [x, y, x + w, y + h]
actual_boxes.append(snake_case )
# finally, normalize the bounding boxes
snake_case_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case , snake_case , snake_case ) )
assert len(snake_case ) == len(snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , a__ = True , a__ = None , a__ = "" , **a__ , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = size if size is not None else {"height": 224, "width": 224}
snake_case_ = get_size_dict(a__ )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_value
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case_ = apply_ocr
snake_case_ = ocr_lang
snake_case_ = tesseract_config
def lowerCAmelCase__ ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
snake_case_ = (size["height"], size["width"])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = None , a__=None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(a__ )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(a__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
snake_case_ = []
snake_case_ = []
for image in images:
snake_case_ , snake_case_ = apply_tesseract(a__ , a__ , a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
snake_case_ = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
snake_case_ = [to_channel_dimension_format(a__ , a__ ) for image in images]
snake_case_ = BatchFeature(data={"pixel_values": images} , tensor_type=a__ )
if apply_ocr:
snake_case_ = words_batch
snake_case_ = boxes_batch
return data
| 365 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving model to {ckpt_dir}' )
snake_case_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
snake_case_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict["model"]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
snake_case_ = os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
snake_case_ = (
os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
snake_case_ = optim_state["optimizer"]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
snake_case_ = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 92 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : str=13 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : int=3 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : List[str]=[1, 2, 1] , __UpperCAmelCase : Optional[Any]=[2, 2, 4] , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=2.0 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=10 , __UpperCAmelCase : str=8 , ) -> Optional[int]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= embed_dim
UpperCAmelCase_= depths
UpperCAmelCase_= num_heads
UpperCAmelCase_= window_size
UpperCAmelCase_= mlp_ratio
UpperCAmelCase_= qkv_bias
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= drop_path_rate
UpperCAmelCase_= hidden_act
UpperCAmelCase_= use_absolute_embeddings
UpperCAmelCase_= patch_norm
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= initializer_range
UpperCAmelCase_= is_training
UpperCAmelCase_= scope
UpperCAmelCase_= use_labels
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= encoder_stride
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> List[Any]:
UpperCAmelCase_= SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
UpperCAmelCase_= ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_= int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase_= SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_= 1
UpperCAmelCase_= SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= self.type_sequence_label_size
UpperCAmelCase_= SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a__ : Dict = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a__ : Union[str, Any] = False
a__ : int = False
a__ : Optional[int] = False
a__ : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= SwinvaModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_= model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
UpperCAmelCase_= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_= [*signature.parameters.keys()]
UpperCAmelCase_= ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= True
for model_class in self.all_model_classes:
UpperCAmelCase_= True
UpperCAmelCase_= False
UpperCAmelCase_= True
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.attentions
UpperCAmelCase_= len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_= True
UpperCAmelCase_= config.window_size**2
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase_= len(__UpperCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase_= True
UpperCAmelCase_= True
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
UpperCAmelCase_= self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_= 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
UpperCAmelCase_= outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.hidden_states
UpperCAmelCase_= getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swinv2 has a different seq_length
UpperCAmelCase_= (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase_= outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= reshaped_hidden_states[0].shape
UpperCAmelCase_= (
reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_= True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= 3
UpperCAmelCase_= (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_= (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_= image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_= image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_= True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_= SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_= SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__UpperCAmelCase )
UpperCAmelCase_= self.default_image_processor
UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 277 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277 | 1 |
import numpy as np
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """gpt_neox"""
def __init__( self : List[str] , __UpperCAmelCase : Tuple=50432 , __UpperCAmelCase : str=6144 , __UpperCAmelCase : Any=44 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=24576 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : List[Any]=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Tuple , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : Dict = rotary_pct
a : Any = rotary_emb_base
a : Dict = attention_dropout
a : List[str] = hidden_dropout
a : List[str] = classifier_dropout
a : Any = initializer_range
a : Union[str, Any] = layer_norm_eps
a : int = use_cache
a : int = tie_word_embeddings
a : str = use_parallel_residual
a : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def __snake_case ( self : Any):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''')
a : str = self.rope_scaling.get("type" , __UpperCAmelCase)
a : List[str] = self.rope_scaling.get("factor" , __UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 226 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Union[str, Any] = torch.nn.Linear(2 , 4 )
a : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0 )
a : Union[str, Any] = torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
a : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( _a ):
"""simple docstring"""
@require_cuda
def __snake_case ( self : Any):
a : List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase):
a : Any = Accelerator(cpu=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : str = Accelerator()
a : Optional[Any] = GradientState()
assert state.num_steps == 1
a : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self : str):
a : int = Accelerator()
a , a , a , a , a : Tuple = create_components()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def __snake_case ( self : Dict):
a : Dict = Accelerator()
a , a , a , a , a : Any = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def __snake_case ( self : int):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
pass
with patch("torch.cuda.set_device" , __UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
a : int = Accelerator()
self.assertEqual(str(accelerator.state.device) , "cuda:64")
def __snake_case ( self : List[str]):
a : Tuple = Accelerator()
a , a , a , a , a : Optional[Any] = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Dict = get_signature(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
def __snake_case ( self : Optional[int]):
a : str = Accelerator()
a , a , a , a , a : Dict = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = get_signature(__UpperCAmelCase)
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : Tuple = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , "data.json") , "w") as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
# loading hook
def load_config(__UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]):
with open(os.path.join(__UpperCAmelCase , "data.json") , "r") as f:
a : Optional[Any] = json.load(__UpperCAmelCase)
a : Tuple = config["class_name"]
a : Optional[int] = accelerator.register_save_state_pre_hook(__UpperCAmelCase)
a : Tuple = accelerator.register_load_state_pre_hook(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : int = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : Dict = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def __snake_case ( self : Optional[Any]):
a : List[str] = Accelerator()
a , a , a , a , a : int = create_components()
a : Tuple = None
# This should work
a , a , a , a , a , a : Any = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def __snake_case ( self : List[str]):
a : str = Accelerator()
a , a , a , a , a : List[Any] = create_components()
a : Union[str, Any] = [1, 2, 3]
# This should work
a , a , a , a , a , a : str = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map={"": 0} , )
a : Tuple = Accelerator()
# This should work
a : List[Any] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Dict = Accelerator()
with init_empty_weights():
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Union[str, Any] = infer_auto_device_map(__UpperCAmelCase)
a : str = "cpu"
a : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Any = infer_auto_device_map(__UpperCAmelCase)
a : Dict = 1
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : int = Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Tuple):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a : Tuple = infer_auto_device_map(__UpperCAmelCase)
a : str = 1
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : str = Accelerator()
# This should work
a : Any = accelerator.prepare(__UpperCAmelCase)
@require_cuda
def __snake_case ( self : List[Any]):
a : Tuple = torch.nn.Linear(10 , 10)
a : int = torch.optim.SGD(model.parameters() , lr=0.01)
a : Optional[Any] = Accelerator(cpu=__UpperCAmelCase)
a : List[str] = accelerator.prepare(__UpperCAmelCase)
| 226 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool:
a_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
a_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None:
"""simple docstring"""
def identity_function(UpperCAmelCase ) -> float:
return x
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
def function_to_integrate(UpperCAmelCase ) -> float:
return sqrt(4.0 - x * x )
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 243 | 1 |
def lowerCamelCase (a_ :int) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''')
lowercase :Any = len(bin(_snake_case)[3:])
lowercase :List[Any] = bin(abs(_snake_case) - (1 << binary_number_length))[3:]
lowercase :Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase (a_ :str , a_ :str) -> str | Literal[False]:
lowercase :Union[str, Any] = list(a_)
lowercase :Optional[Any] = list(a_)
lowercase :str = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count += 1
lowercase :str = '''_'''
if count > 1:
return False
else:
return "".join(a_)
def lowerCamelCase (a_ :list[str]) -> list[str]:
lowercase :Optional[Any] = []
while True:
lowercase :Tuple = ['''$'''] * len(a_)
lowercase :Tuple = []
for i in range(len(a_)):
for j in range(i + 1 , len(a_)):
lowercase :Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowercase :Tuple = '''*'''
lowercase :Any = '''*'''
temp.append('''X''')
for i in range(len(a_)):
if checka[i] == "$":
pi.append(binary[i])
if len(a_) == 0:
return pi
lowercase :str = list(set(a_))
def lowerCamelCase (a_ :int , a_ :Sequence[float]) -> list[str]:
lowercase :Optional[int] = []
for minterm in minterms:
lowercase :List[str] = ''''''
for _ in range(a_):
lowercase :List[str] = str(minterm % 2) + string
minterm //= 2
temp.append(a_)
return temp
def lowerCamelCase (a_ :str , a_ :str , a_ :int) -> bool:
lowercase :int = list(a_)
lowercase :str = list(a_)
lowercase :List[str] = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase (a_ :list[list[int]] , a_ :list[str]) -> list[str]:
lowercase :Any = []
lowercase :List[Any] = [0] * len(a_)
for i in range(len(chart[0])):
lowercase :List[Any] = 0
lowercase :int = -1
for j in range(len(a_)):
if chart[j][i] == 1:
count += 1
lowercase :List[Any] = j
if count == 1:
lowercase :Tuple = 1
for i in range(len(a_)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a_)):
lowercase :List[str] = 0
temp.append(prime_implicants[i])
while True:
lowercase :Tuple = 0
lowercase :Dict = -1
lowercase :int = 0
for i in range(len(a_)):
lowercase :List[Any] = chart[i].count(1)
if count_n > max_n:
lowercase :List[Any] = count_n
lowercase :int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a_)):
lowercase :Tuple = 0
def lowerCamelCase (a_ :list[str] , a_ :list[str]) -> list[list[int]]:
lowercase :Dict = [[0 for x in range(len(a_))] for x in range(len(a_))]
for i in range(len(a_)):
lowercase :Any = prime_implicants[i].count('''_''')
for j in range(len(a_)):
if is_for_table(prime_implicants[i] , binary[j] , a_):
lowercase :int = 1
return chart
def lowerCamelCase () -> None:
lowercase :int = int(input('''Enter the no. of variables\n'''))
lowercase :Tuple = [
float(a_)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
lowercase :Dict = decimal_to_binary(a_ , a_)
lowercase :List[Any] = check(a_)
print('''Prime Implicants are:''')
print(a_)
lowercase :Union[str, Any] = prime_implicant_chart(a_ , a_)
lowercase :Dict = selection(a_ , a_)
print('''Essential Prime Implicants are:''')
print(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 172 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
A__ = [0, 2, 4, 6, 8]
A__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case , snake_case )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case , snake_case , )
return result
def _UpperCAmelCase ( snake_case = 9 ):
"""simple docstring"""
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case , 0 , [0] * length , snake_case )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase_ : Dict = 3_0_0 # TEMPERATURE (unit = K)
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
a =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
a =parse_args()
# Import training_script as a module.
a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a =script_fpath.stem
a =importlib.import_module(lowercase )
# Patch sys.argv
a =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 81 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase = """base_with_context"""
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase =ly_weight['attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =ly_weight['attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__lowercase =ly_weight['self_attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =ly_weight['MultiHeadDotProductAttention_0']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowercase =jnp.tree_util.tree_map(onp.array , _lowerCAmelCase )
__lowercase =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__lowercase =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__lowercase =inference.parse_training_gin_file(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =inference.InferenceModel(args.checkpoint_path , _lowerCAmelCase )
__lowercase =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__lowercase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowercase =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _lowerCAmelCase )
__lowercase =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _lowerCAmelCase )
__lowercase =load_decoder(ta_checkpoint['target']['decoder'] , _lowerCAmelCase )
__lowercase =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__lowercase =SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCamelCase = parser.parse_args()
main(args)
| 48 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCAmelCase ( UpperCamelCase_=None , UpperCamelCase_=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__lowercase : bool = field(
default=__a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__lowercase : bool = field(
default=__a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__lowercase : bool = field(
default=__a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__lowercase : bool = field(
default=__a , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__lowercase : Optional[str] = field(
default=__a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__lowercase : Optional[List[str]] = list_field(
default=__a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCAmelCase ( UpperCamelCase_ ):
try:
int(UpperCamelCase_ )
return True
except ValueError:
return False
def _lowerCAmelCase ( UpperCamelCase_ ):
try:
float(UpperCamelCase_ )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="""""") as csv_file:
__SCREAMING_SNAKE_CASE = csv.DictReader(lowerCAmelCase__)
for row in reader:
__SCREAMING_SNAKE_CASE = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""]))
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""]))
if can_convert_to_int(row["""result"""]):
# value is not None
__SCREAMING_SNAKE_CASE = int(row["""result"""])
elif can_convert_to_float(row["""result"""]):
# value is not None
__SCREAMING_SNAKE_CASE = float(row["""result"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = plt.subplots()
__SCREAMING_SNAKE_CASE = """Time usage""" if self.args.is_time else """Memory usage"""
__SCREAMING_SNAKE_CASE = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""")
ax.set_yscale("""log""")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
__SCREAMING_SNAKE_CASE = sorted(set(self.result_dict[model_name]["""bsz"""]))
__SCREAMING_SNAKE_CASE = sorted(set(self.result_dict[model_name]["""seq_len"""]))
__SCREAMING_SNAKE_CASE = self.result_dict[model_name]["""result"""]
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__SCREAMING_SNAKE_CASE = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__SCREAMING_SNAKE_CASE = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__SCREAMING_SNAKE_CASE = np.asarray(lowerCAmelCase__ , lowerCAmelCase__)[: len(lowerCAmelCase__)]
plt.scatter(
lowerCAmelCase__ , lowerCAmelCase__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ , """--""")
title_str += f" {label_model_name} vs."
__SCREAMING_SNAKE_CASE = title_str[:-4]
__SCREAMING_SNAKE_CASE = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(lowerCAmelCase__)
plt.xlabel(lowerCAmelCase__)
plt.ylabel(lowerCAmelCase__)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = HfArgumentParser(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE = Plot(args=UpperCamelCase_ )
plot.plot()
if __name__ == "__main__":
main()
| 100 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowerCAmelCase : Union[str, Any] = deepcopy(lowerCAmelCase )
elif os.path.exists(lowerCAmelCase ):
with io.open(lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : str = json.load(lowerCAmelCase )
else:
try:
__lowerCAmelCase : List[Any] = baseaa.urlsafe_baadecode(lowerCAmelCase ).decode("""utf-8""" )
__lowerCAmelCase : str = json.loads(lowerCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__lowerCAmelCase : Tuple = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_value("""zero_optimization.stage""" , -1 )
# offload
__lowerCAmelCase : int = False
if self.is_zeroa() or self.is_zeroa():
__lowerCAmelCase : Optional[int] = set(["""cpu""", """nvme"""] )
__lowerCAmelCase : int = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowerCAmelCase : Any = True
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.config
# find the config node of interest if it exists
__lowerCAmelCase : int = ds_key_long.split(""".""" )
__lowerCAmelCase : Optional[int] = nodes.pop()
for node in nodes:
__lowerCAmelCase : int = config.get(lowerCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.find_config_node(lowerCAmelCase )
if config is None:
return default
return config.get(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.config
# find the config node of interest if it exists
__lowerCAmelCase : Dict = ds_key_long.split(""".""" )
for node in nodes:
__lowerCAmelCase : Tuple = config
__lowerCAmelCase : Dict = config.get(lowerCAmelCase )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.get_value(lowerCAmelCase )
return False if value is None else bool(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.get_value(lowerCAmelCase )
return False if value is None else not bool(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
return self._offload
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = engine
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
self.engine.backward(lowerCAmelCase , **lowerCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase , device_placement=lowerCAmelCase , scaler=lowerCAmelCase )
__lowerCAmelCase : int = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[Any]=None ) -> List[str]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int]=0.001 , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = params
__lowerCAmelCase : List[Any] = lr
__lowerCAmelCase : Any = weight_decay
__lowerCAmelCase : List[str] = kwargs
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = optimizer
__lowerCAmelCase : Optional[Any] = total_num_steps
__lowerCAmelCase : Tuple = warmup_num_steps
__lowerCAmelCase : Union[str, Any] = kwargs
| 353 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase : List[Any] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]:
_snake_case = '\n'.join(__A )
Path(__A ).open('w' ).writelines(__A )
lowercase : List[Any] = "patrickvonplaten/t5-tiny-random"
lowercase : Optional[int] = "sshleifer/bart-tiny-random"
lowercase : Dict = "sshleifer/tiny-mbart"
lowercase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ):
run_generate()
assert Path(lowerCAmelCase_ ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / 'scores.json' )
_snake_case = str(tmp_dir / 'val.target' )
_dump_articles(lowerCAmelCase_ , text['en'] )
_dump_articles(lowerCAmelCase_ , text['de'] )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = F'\n run_eval_search.py\n {model}\n {str(lowerCAmelCase_ )}\n {str(lowerCAmelCase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ):
with CaptureStdout() as cs:
run_search()
_snake_case = [' num_beams | length_penalty', model, 'Best score args']
_snake_case = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowerCAmelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase_ ).exists()
os.remove(Path(lowerCAmelCase_ ) )
| 42 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : str = len(lowerCAmelCase__ )
__a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : str = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Tuple = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 216 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( a__ ):
"""simple docstring"""
lowerCamelCase :UNetaDModel
lowerCamelCase :ScoreSdeVeScheduler
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> Union[ImagePipelineOutput, Tuple]:
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
_A = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma
_A = sample.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase__ )
self.scheduler.set_sigmas(lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_A = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
_A = self.scheduler.step_correct(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# prediction step
_A = model(lowerCAmelCase__ , lowerCAmelCase__ ).sample
_A = self.scheduler.step_pred(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_A = output.prev_sample, output.prev_sample_mean
_A = sample_mean.clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 363 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *lowerCamelCase_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE_ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE_ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE_ , fcntl.LOCK_UN )
A__ : int = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
A__ : List[str] = torch.device('cuda', local_rank)
A__ : Union[str, Any] = socket.gethostname()
A__ : str = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A__ : Tuple = dist.get_rank()
A__ : Optional[int] = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 207 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = data
SCREAMING_SNAKE_CASE_ = None
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __iter__( self ) -> Iterator[Any]:
SCREAMING_SNAKE_CASE_ = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_ = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> Union[str, Any]:
return "->".join(str(_A ) for item in iter(self ) )
def _UpperCamelCase ( self , _A ) -> None:
self.insert_nth(len(self ) , _A )
def _UpperCamelCase ( self , _A ) -> None:
self.insert_nth(0 , _A )
def _UpperCamelCase ( self , _A , _A ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE_ = Node(_A )
if self.head is None:
SCREAMING_SNAKE_CASE_ = new_node # first node points itself
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_ = self.head
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = new_node
else:
SCREAMING_SNAKE_CASE_ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ = temp.next
SCREAMING_SNAKE_CASE_ = temp.next
SCREAMING_SNAKE_CASE_ = new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE_ = new_node
def _UpperCamelCase ( self ) -> Dict:
return self.delete_nth(0 )
def _UpperCamelCase ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _UpperCamelCase ( self , _A = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE_ = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_ = self.tail.next.next
SCREAMING_SNAKE_CASE_ = self.head.next
else:
SCREAMING_SNAKE_CASE_ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ = temp.next
SCREAMING_SNAKE_CASE_ = temp.next
SCREAMING_SNAKE_CASE_ = temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE_ = temp
return delete_node.data
def _UpperCamelCase ( self ) -> bool:
return len(self ) == 0
def A__ ( ):
SCREAMING_SNAKE_CASE_ = CircularLinkedList()
assert len(__lowerCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__lowerCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__lowerCamelCase ) == i
circular_linked_list.insert_nth(__lowerCamelCase, i + 1 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Union[str, Any] = logging.get_logger(__name__)
a_ :int = torch.device("cpu")
def lowercase_ ():
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Tuple = Image.open(requests.get(A , stream=A ).raw )
return im
def lowercase_ (A : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def lowercase_ (A : List[Any] , A : Optional[Any] , A : Tuple ):
snake_case__ : str = dct.pop(A )
snake_case__ : Optional[Any] = val
def lowercase_ (A : Optional[int] ):
snake_case__ : List[str] = []
for k in state_dict.keys():
snake_case__ : List[str] = k
if ".pwconv" in k:
snake_case__ : Union[str, Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
snake_case__ : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
snake_case__ : List[str] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
snake_case__ : List[str] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
snake_case__ : Dict = k_new.split('.' )
if ls[2].isdigit():
snake_case__ : List[str] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
snake_case__ : Dict = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase_ (A : str , A : Optional[int] , A : Optional[int] ):
snake_case__ : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1_0_0_0
snake_case__ : Any = 'huggingface/label-files'
snake_case__ : Union[str, Any] = 'imagenet-1k-id2label.json'
snake_case__ : List[Any] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Union[str, Any] = {int(A ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ : Optional[Any] = [3, 3, 6, 4]
snake_case__ : Union[str, Any] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
snake_case__ : Optional[int] = [3, 3, 9, 6]
snake_case__ : List[Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
snake_case__ : Any = [4, 3, 1_0, 5]
snake_case__ : Dict = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
snake_case__ : Tuple = [4, 4, 1_2, 6]
snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
snake_case__ : Tuple = torch.hub.load_state_dict_from_url(A , map_location='cpu' , check_hash=A )
else:
snake_case__ : Optional[int] = torch.load(A , map_location='cpu' )
snake_case__ : Optional[Any] = checkpoint
snake_case__ : int = create_rename_keys(A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A , A , A )
# load HuggingFace model
snake_case__ : Union[str, Any] = SwiftFormerForImageClassification(A ).eval()
hf_model.load_state_dict(A )
# prepare test inputs
snake_case__ : Dict = prepare_img()
snake_case__ : Union[str, Any] = ViTImageProcessor.from_pretrained('preprocessor_config' )
snake_case__ : List[str] = processor(images=A , return_tensors='pt' )
# compare outputs from both models
snake_case__ : str = get_expected_output(A )
snake_case__ : Union[str, Any] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , A , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(A )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
a_ :str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 277 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
a_ :List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a_ :Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase_ (A : Union[str, Any] , A : str , A : Dict , A : Optional[Any] , A : Optional[Any] ):
for attribute in key.split('.' ):
snake_case__ : Any = getattr(A , A )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(A , A ).shape
else:
snake_case__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ : Tuple = value
elif weight_type == "weight_g":
snake_case__ : Tuple = value
elif weight_type == "weight_v":
snake_case__ : List[Any] = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
else:
snake_case__ : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase_ (A : str , A : Any ):
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.feature_extractor
snake_case__ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case__ : Any = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : List[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(A , A , A , A )
snake_case__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ : Tuple = True
if "*" in mapped_key:
snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2]
snake_case__ : Optional[int] = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case__ : Optional[int] = 'weight_g'
elif "weight_v" in name:
snake_case__ : Optional[Any] = 'weight_v'
elif "bias" in name:
snake_case__ : Union[str, Any] = 'bias'
elif "weight" in name:
snake_case__ : Optional[int] = 'weight'
else:
snake_case__ : Tuple = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase_ (A : Union[str, Any] , A : Any , A : str , A : str , A : int ):
snake_case__ : str = full_name.split('conv_layers.' )[-1]
snake_case__ : Optional[int] = name.split('.' )
snake_case__ : Tuple = int(items[0] )
snake_case__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
def lowercase_ (A : Optional[Any] , A : Any , A : Tuple , A : Any ):
snake_case__ : List[str] = full_name.split('adaptor.' )[-1]
snake_case__ : Tuple = name.split('.' )
if items[1].isdigit():
snake_case__ : Optional[int] = int(items[1] )
else:
snake_case__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
snake_case__ : List[Any] = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
snake_case__ : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
snake_case__ : str = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
snake_case__ : Dict = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(A , A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
snake_case__ : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
snake_case__ : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(A )
def lowercase_ (A : int ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : int = nn.Linear(A , A , bias=A )
snake_case__ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase_ (A : Tuple , A : Tuple , A : Any , A : Optional[Any] , A : int , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , ):
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(
A , add_adapter=A , adapter_stride=A , adapter_kernel_size=A , use_auth_token=A , output_hidden_size=A , )
snake_case__ : Dict = MBartConfig.from_pretrained(A )
# load model
snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
snake_case__ : List[Any] = model[0].eval()
# load feature extractor
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(A , use_auth_token=A )
# set weights for wav2vec2 encoder
snake_case__ : List[str] = WavaVecaModel(A )
recursively_load_weights_wavaveca(model.encoder , A )
# load decoder weights
snake_case__ : Any = MBartForCausalLM(A )
snake_case__ , snake_case__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=A , decoder=A )
snake_case__ : str = False
snake_case__ : int = MBartaaTokenizer(A )
tokenizer.save_pretrained(A )
snake_case__ : Any = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Union[str, Any] = tokenizer.bos_token_id
snake_case__ : Dict = tokenizer.eos_token_id
snake_case__ : Optional[int] = 'mbart50'
snake_case__ : Union[str, Any] = 'wav2vec2'
snake_case__ : List[str] = tokenizer.eos_token_id
snake_case__ : Union[str, Any] = 2_5_0_0_0_4
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
a_ :str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
a_ :Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 277 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Any=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[Any]=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item.replace("in_layers.0" , "norm1" )
lowercase_ = new_item.replace("in_layers.2" , "conv1" )
lowercase_ = new_item.replace("out_layers.0" , "norm2" )
lowercase_ = new_item.replace("out_layers.3" , "conv2" )
lowercase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase_ = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase_ = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item
lowercase_ = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase_ = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase_ = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[Any]=None ):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ = old_checkpoint[path]
lowercase_ = old_tensor.shape[0] // 3
lowercase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ = query.reshape(UpperCamelCase__ )
lowercase_ = key.reshape(UpperCamelCase__ )
lowercase_ = value.reshape(UpperCamelCase__ )
for path in paths:
lowercase_ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ = old_checkpoint[path['''old''']]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = checkpoint['''time_embed.0.weight''']
lowercase_ = checkpoint['''time_embed.0.bias''']
lowercase_ = checkpoint['''time_embed.2.weight''']
lowercase_ = checkpoint['''time_embed.2.bias''']
lowercase_ = checkpoint['''input_blocks.0.0.weight''']
lowercase_ = checkpoint['''input_blocks.0.0.bias''']
lowercase_ = checkpoint['''out.0.weight''']
lowercase_ = checkpoint['''out.0.bias''']
lowercase_ = checkpoint['''out.2.weight''']
lowercase_ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the middle blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the output blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
for i in range(1 , UpperCamelCase__ ):
lowercase_ = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
lowercase_ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
lowercase_ = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
lowercase_ = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = {'''old''': F'input_blocks.{i}.0', '''new''': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
lowercase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase__ )
if len(UpperCamelCase__ ):
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''old''': F'input_blocks.{i}.1',
'''new''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase_ = {
F'input_blocks.{i}.1.qkv.bias': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ , )
lowercase_ = middle_blocks[0]
lowercase_ = middle_blocks[1]
lowercase_ = middle_blocks[2]
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
lowercase_ = i // (config['''num_res_blocks'''] + 1)
lowercase_ = i % (config['''num_res_blocks'''] + 1)
lowercase_ = [shave_segments(UpperCamelCase__ , 2 ) for name in output_blocks[i]]
lowercase_ = {}
for layer in output_block_layers:
lowercase_ = layer.split("." )[0], shave_segments(UpperCamelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase__ )
else:
lowercase_ = [layer_name]
if len(UpperCamelCase__ ) > 1:
lowercase_ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
lowercase_ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = {'''old''': F'output_blocks.{i}.0', '''new''': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase_ = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
lowercase_ = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase__ ) == 2:
lowercase_ = []
if len(UpperCamelCase__ ):
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''old''': F'output_blocks.{i}.1',
'''new''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase_ = {
F'output_blocks.{i}.1.qkv.bias': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCamelCase__ , )
else:
lowercase_ = renew_resnet_paths(UpperCamelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ = '''.'''.join(["output_blocks", str(UpperCamelCase__ ), path["old"]] )
lowercase_ = '''.'''.join(["up_blocks", str(UpperCamelCase__ ), "resnets", str(UpperCamelCase__ ), path["new"]] )
lowercase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 352 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : int = 2_50
__UpperCAmelCase : Dict = ids_tensor((batch_size, length) , a_ )
__UpperCAmelCase : str = torch.ones((batch_size, length) , device=a_ , dtype=torch.float ) / length
return input_ids, scores
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._get_tensors(5 )
__UpperCAmelCase : Dict = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : str = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : Any = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : str = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
__UpperCAmelCase : Any = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a_ , a_ ) )
__UpperCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a_ , a_ ) )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a_ ) , 1 )
| 226 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__UpperCAmelCase : Optional[int] = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
__UpperCAmelCase : Optional[int] = '''huggingface/label-files'''
if "o365" in model_name:
__UpperCAmelCase : Tuple = 3_66
__UpperCAmelCase : List[str] = '''object365-id2label.json'''
else:
__UpperCAmelCase : Any = 91
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = val
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCAmelCase : List[Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:dim, :]
__UpperCAmelCase : List[str] = in_proj_bias[: dim]
__UpperCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Any = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Tuple = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : int = in_proj_bias[-dim :]
# fmt: on
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[str] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : Tuple = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:hidden_size]
__UpperCAmelCase : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size:]
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Dict = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__UpperCAmelCase : str = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
__UpperCAmelCase : int = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : Optional[Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
if "input_proj" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Union[str, Any] = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_UpperCAmelCase )
# load image processor
__UpperCAmelCase : str = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Optional[int] = processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
__UpperCAmelCase : List[str] = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : str = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCAmelCase : str = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ ):
if not nums:
return 0
UpperCAmelCase : Optional[int] = nums[0]
UpperCAmelCase : List[str] = 0
for num in nums[1:]:
UpperCAmelCase , UpperCAmelCase : List[str] = (
max_excluding + num,
max(UpperCAmelCase_ , UpperCAmelCase_ ),
)
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase : list = []
for char_count in range(UpperCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 280 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = DistilBertTokenizer
lowercase_ : Optional[int] = DistilBertTokenizerFast
lowercase_ : Any = True
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
_lowercase : Union[str, Any] = tokenizer.encode('sequence builders', add_special_tokens=lowerCamelCase)
_lowercase : Tuple = tokenizer.encode('multi-sequence build', add_special_tokens=lowerCamelCase)
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase)
_lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21 | """simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a : int= datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
UpperCAmelCase : Optional[datasets.Features] = None
UpperCAmelCase : str = "utf-8"
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : bool = True # deprecated
UpperCAmelCase : Optional[int] = None # deprecated
UpperCAmelCase : int = 10 << 20 # 10MB
UpperCAmelCase : Optional[bool] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
UpperCAmelCase : int = JsonConfig
def _lowercase (self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
__snake_case : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def _lowercase (self : Dict , _A : Any) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__snake_case : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
__snake_case : str = data_files
if isinstance(_A , _A):
__snake_case : int = [files]
__snake_case : Tuple = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__snake_case : str = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
__snake_case : Optional[int] = [files]
__snake_case : int = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def _lowercase (self : Optional[Any] , _A : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__snake_case : List[Any] = self.config.features.arrow_schema.field(_A).type
__snake_case : Any = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def _lowercase (self : Dict , _A : Any) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : Tuple = json.load(_A)
# We keep only the field we are interested in
__snake_case : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
__snake_case : Optional[int] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
else:
__snake_case : Optional[int] = dataset
__snake_case : Tuple = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
__snake_case : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case : Tuple = max(self.config.chunksize // 32 , 16 << 10)
__snake_case : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__snake_case : Union[str, Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case : int = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
__snake_case : Tuple = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : List[Any] = json.load(_A)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
__snake_case : List[str] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
__snake_case : List[str] = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 172 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 18 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 1 |
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : Optional[int] ,lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.position == cell.position
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
print(self.position )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any]=(5, 5) ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
print(self.w )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCamelCase__ )
return neighbours
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(_SCREAMING_SNAKE_CASE )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (0, 0)
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
SCREAMING_SNAKE_CASE_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE_ = 1
print(world.w)
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaImgaImgPipeline
UpperCAmelCase : Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image"""]
UpperCAmelCase : str = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCAmelCase : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Tuple = False
@property
def __snake_case ( self : Any):
return 32
@property
def __snake_case ( self : Union[str, Any]):
return 32
@property
def __snake_case ( self : int):
return self.time_input_dim
@property
def __snake_case ( self : List[str]):
return self.time_input_dim * 4
@property
def __snake_case ( self : Dict):
return 100
@property
def __snake_case ( self : Optional[Any]):
torch.manual_seed(0)
a : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a : Any = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : Any):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : Tuple = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : str):
a : Union[str, Any] = self.dummy_unet
a : Union[str, Any] = self.dummy_movq
a : Optional[int] = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
a : Tuple = DDIMScheduler(**__UpperCAmelCase)
a : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Dict=0):
a : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create init_image
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : Tuple = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Optional[int] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((256, 256))
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : str = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[int] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __snake_case ( self : List[str]):
a : List[Any] = 'cpu'
a : Optional[int] = self.get_dummy_components()
a : Any = self.pipeline_class(**__UpperCAmelCase)
a : int = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Dict = output.images
a : List[Any] = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Any = image[0, -3:, -3:, -1]
a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : str = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any):
a : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy")
a : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
a : Dict = 'A red cartoon frog, 4k'
a : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : Any = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa)
a : Optional[Any] = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = torch.Generator(device="cpu").manual_seed(0)
a : Tuple = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : Dict = pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 351 |
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=None):
a : int = data
a : Dict = None
def __repr__( self : Dict):
a : List[Any] = []
a : str = self
while temp:
string_rep.append(f'''{temp.data}''')
a : Tuple = temp.next
return "->".join(__UpperCAmelCase)
def lowercase ( A_ )-> Any:
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
a : Any = Node(elements_list[0] )
for i in range(1 , len(A_ ) ):
a : int = Node(elements_list[i] )
a : Optional[Any] = current.next
return head
def lowercase ( A_ )-> None:
'''simple docstring'''
if head_node is not None and isinstance(A_ , A_ ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase ( )-> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
a : Union[str, Any] = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A_ )
print("Elements in Reverse:" )
print_reverse(A_ )
if __name__ == "__main__":
main()
| 226 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : bool = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
lowerCamelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCamelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
lowerCamelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
lowerCamelCase_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = v.to_dict()
return d
| 48 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ={
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
UpperCAmelCase : Any =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase : Union[str, Any] =BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCAmelCase , output_all_encodings=__lowerCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase : Optional[int] ='''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase : Tuple =os.path.join(get_home_dir() , '''models''' )
UpperCAmelCase : Any =_load_vocab(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls=__lowerCAmelCase )
UpperCAmelCase : List[Any] =nlp.model.BERTModel(
__lowerCAmelCase , len(__lowerCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCAmelCase , use_token_type_embed=__lowerCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCAmelCase , use_decoder=__lowerCAmelCase , )
original_bort.load_parameters(__lowerCAmelCase , cast_dtype=__lowerCAmelCase , ignore_extra=__lowerCAmelCase )
UpperCAmelCase : List[str] =original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase : List[Any] ={
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCAmelCase ),
}
UpperCAmelCase : str =BertConfig.from_dict(__lowerCAmelCase )
UpperCAmelCase : Dict =BertForMaskedLM(__lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =hf_param.shape
UpperCAmelCase : List[Any] =to_torch(params[gluon_param] )
UpperCAmelCase : Optional[Any] =gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
UpperCAmelCase : List[str] =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
UpperCAmelCase : Tuple =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
UpperCAmelCase : Dict =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
UpperCAmelCase : str =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase : int =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase : BertLayer =hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase : BertSelfAttention =layer.attention.self
UpperCAmelCase : Optional[int] =check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
UpperCAmelCase : Any =check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
UpperCAmelCase : Tuple =check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
UpperCAmelCase : Optional[Any] =check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
UpperCAmelCase : Optional[Any] =check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
UpperCAmelCase : Optional[int] =check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
UpperCAmelCase : BertSelfOutput =layer.attention.output
UpperCAmelCase : Any =check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
UpperCAmelCase : List[Any] =check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
UpperCAmelCase : Optional[int] =check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
UpperCAmelCase : List[Any] =check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
UpperCAmelCase : BertIntermediate =layer.intermediate
UpperCAmelCase : Dict =check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
UpperCAmelCase : List[str] =check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
UpperCAmelCase : BertOutput =layer.output
UpperCAmelCase : Union[str, Any] =check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
UpperCAmelCase : Tuple =check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
UpperCAmelCase : Optional[int] =check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
UpperCAmelCase : Dict =check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase : Optional[Any] =RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCAmelCase : str =tokenizer.encode_plus(__lowerCAmelCase )['''input_ids''']
# Get gluon output
UpperCAmelCase : List[Any] =mx.nd.array([input_ids] )
UpperCAmelCase : List[Any] =original_bort(inputs=__lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCAmelCase )
UpperCAmelCase : List[Any] =BertModel.from_pretrained(__lowerCAmelCase )
hf_bort_model.eval()
UpperCAmelCase : Tuple =tokenizer.encode_plus(__lowerCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : int =hf_bort_model(**__lowerCAmelCase )[0]
UpperCAmelCase : Tuple =output_gluon[0].asnumpy()
UpperCAmelCase : str =output_hf[0].detach().numpy()
UpperCAmelCase : Any =np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase : Tuple =np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 78 | class __snake_case :
def __init__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =n
UpperCAmelCase : Any =[None] * self.n
UpperCAmelCase : Tuple =0 # index of the first element
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : str =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase : Tuple =data
UpperCAmelCase : Optional[Any] =(self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase : int =self.array[self.front]
UpperCAmelCase : Any =None
UpperCAmelCase : Tuple =(self.front + 1) % self.n
self.size -= 1
return temp
| 78 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class a__ ( _a ):
_a : int = ComputeEnvironment.AMAZON_SAGEMAKER
_a : List[Any] = True
_a : Dict = '''ml.p3.2xlarge'''
_a : Any = '''accelerate_sagemaker_execution_role'''
_a : Union[str, Any] = '''hf-sm'''
_a : Dict = '''us-east-1'''
_a : List[Any] = 1
_a : Union[str, Any] = '''accelerate-sagemaker-1'''
_a : List[Any] = '''1.6'''
_a : Optional[Any] = '''4.4'''
_a : Any = '''train.py'''
_a : int = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_a : Optional[Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["do_train"] , SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["epochs"] , SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["learning_rate"] , SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["max_steps"] , SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 92 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = CustomTokenizer
pass
| 48 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =XLNetConfig.from_json_file(_lowerCAmelCase )
__lowercase =finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__lowercase =finetuning_task
__lowercase =GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowercase =XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
__lowercase =finetuning_task
__lowercase =XLNetForQuestionAnswering(_lowerCAmelCase )
else:
__lowercase =XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
print(f"""Save configuration file to {os.path.abspath(_lowerCAmelCase )}""" )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 48 | 1 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def a_ ( _lowerCAmelCase : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def a_ ( _lowerCAmelCase : str = "" ):
'''simple docstring'''
if len(_lowerCAmelCase ) == 0:
return True
lowercase__ : int = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : Optional[Any] = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1
lowercase__ : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a_ ( _lowerCAmelCase : str = "" ):
'''simple docstring'''
print('\nFor string = ' , _lowerCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
_UpperCamelCase : int = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
_UpperCamelCase : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 77 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number | (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _A ( lowercase , lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowercase ( snake_case_ : int ) ->int:
'''simple docstring'''
__A : str = filter(lambda snake_case_ : p.requires_grad ,model.parameters() )
__A : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ = logging.getLogger(__name__)
def __lowercase ( snake_case_ : Any ,snake_case_ : List[Any] ) ->List[Any]:
'''simple docstring'''
if metric == "rouge2":
__A : Union[str, Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__A : int = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__A : Dict = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__A : int = ModelCheckpoint(
dirpath=snake_case_ ,filename=snake_case_ ,monitor=F"""val_{metric}""" ,mode='''max''' ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def __lowercase ( snake_case_ : Optional[Any] ,snake_case_ : int ) ->List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode='''min''' if '''loss''' in metric else '''max''' ,patience=snake_case_ ,verbose=snake_case_ ,)
class __snake_case ( pl.Callback ):
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : str = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__A : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__A : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
__A : str = od / '''test_results.txt'''
__A : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__A : List[str] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__A : List[Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''a+''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__A : int = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
__A : Dict = val.item()
__A : int = F"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
__A : str = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCamelCase )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
try:
__A : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
__A : str = pl_module.model.num_parameters()
__A : Optional[Any] = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , '''test''' )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 260 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase__ )
return parser.parse_args()
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_lowerCAmelCase = parse_args()
# Import training_script as a module.
_lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase = script_fpath.stem
_lowerCAmelCase = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
_lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 353 |
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: Dict = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "realm"
def __init__( self :Any , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE :List[str]=1_2_8 , SCREAMING_SNAKE_CASE :Any=1_2 , SCREAMING_SNAKE_CASE :Any=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=8 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :Tuple="gelu_new" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :int=0.1 , SCREAMING_SNAKE_CASE :List[str]=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :Optional[Any]=0.02 , SCREAMING_SNAKE_CASE :Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE :Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0 , SCREAMING_SNAKE_CASE :List[Any]=1e-3 , SCREAMING_SNAKE_CASE :int=5 , SCREAMING_SNAKE_CASE :Optional[Any]=3_2_0 , SCREAMING_SNAKE_CASE :Tuple=1_3_3_5_3_7_1_8 , SCREAMING_SNAKE_CASE :int=5_0_0_0 , SCREAMING_SNAKE_CASE :Dict=1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , SCREAMING_SNAKE_CASE :str=2 , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Common config
_a : Tuple =vocab_size
_a : List[Any] =max_position_embeddings
_a : List[Any] =hidden_size
_a : Any =retriever_proj_size
_a : List[Any] =num_hidden_layers
_a : int =num_attention_heads
_a : Optional[Any] =num_candidates
_a : Optional[Any] =intermediate_size
_a : Tuple =hidden_act
_a : Optional[Any] =hidden_dropout_prob
_a : Any =attention_probs_dropout_prob
_a : Optional[Any] =initializer_range
_a : List[str] =type_vocab_size
_a : str =layer_norm_eps
# Reader config
_a : Tuple =span_hidden_size
_a : List[Any] =max_span_width
_a : Optional[Any] =reader_layer_norm_eps
_a : int =reader_beam_size
_a : Tuple =reader_seq_len
# Retrieval config
_a : Optional[Any] =num_block_records
_a : Any =searcher_beam_size
| 276 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __snake_case ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def snake_case_ ( self : Any ):
super().setUp()
# fmt: off
_UpperCAmelCase : Any = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[int] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def snake_case_ ( self : int , **A : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , A : Any ):
_UpperCAmelCase : Optional[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_UpperCAmelCase : List[Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def snake_case_ ( self : Optional[Any] , A : str ):
_UpperCAmelCase , _UpperCAmelCase : str = self.get_input_output_texts(A )
_UpperCAmelCase : List[Any] = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def snake_case_ ( self : Any ):
pass # TODO add if relevant
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : int ):
pass # TODO add if relevant
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Optional[int] = "こんにちは、世界。 こんばんは、㔺界。"
_UpperCAmelCase : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Dict = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_UpperCAmelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
_UpperCAmelCase : int = tokenizer.encode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : List[Any] = "こんにちは、世界。"
_UpperCAmelCase : List[str] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Any = "こんにちは、世界。こんばんは、世界。😀"
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase : Tuple = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , prefix_text=A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : Any = "こんにちは、世界。"
_UpperCAmelCase : List[Any] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Optional[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[str] = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase : str = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase : Any = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase : List[Any] = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Dict = tokenizer.encode("あンいワ" )
_UpperCAmelCase : str = tokenizer.encode("" , prefix_text="あンいワ" )
_UpperCAmelCase : Dict = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_UpperCAmelCase : Tuple = tokenizer(A , padding=A )
_UpperCAmelCase : str = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
_UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_UpperCAmelCase : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def snake_case_ ( self : List[Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case_ ( self : int ):
# tokenizer has no padding token
pass
| 202 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCAmelCase : Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[str]:
with open(a , 'r' , encoding='utf-8' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Dict = collections.OrderedDict()
__A : Optional[int] = collections.OrderedDict()
__A : Optional[int] = collections.OrderedDict()
with open(a , 'r' , encoding='utf-8' ) as f:
__A : Tuple = f.readlines()
__A : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(a ):
__A : List[str] = b
__A : Tuple = idx
for wd in b:
__A : Union[str, Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|startoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__A : Any = do_clean_text
__A , __A , __A , __A : str = load_vocab_and_emoji(_A , _A )
__A : Dict = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase_ ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCAmelCase_ ( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _A ):
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def UpperCAmelCase_ ( self , _A ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.subword_tokenizer.convert_id_to_token(_A )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = ''.join(_A ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__A : Dict = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = 0
if os.path.isdir(_A ):
__A : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__A : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__A : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : List[str] = token_index
writer.write(','.join(_A ) + '\n' )
index += 1
with open(_A , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : List[Any] = vocab # same as swe
__A : int = ids_to_tokens # same as bpe
__A : Any = emoji
__A : Dict = np.max([len(_A ) for w in self.vocab.keys()] )
__A : str = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__A : Dict = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__A : Optional[int] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__A : Any = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__A : Optional[int] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__A : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__A : Tuple = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__A : Optional[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__A : Optional[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def UpperCAmelCase_ ( self , _A ):
__A : int = self.content_repattera.sub('<URL>' , _A )
__A : Optional[int] = self.content_repattera.sub('<EMAIL>' , _A )
__A : int = self.content_repattera.sub('<TEL>' , _A )
__A : str = self.content_repattera.sub('<DATE>' , _A )
__A : Optional[Any] = self.content_repattera.sub('<DATE>' , _A )
__A : List[Any] = self.content_repattera.sub('<PRICE>' , _A )
__A : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__A : List[str] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Tuple = text.replace(' ' , '<SP>' )
__A : List[str] = text.replace(' ' , '<SP>' )
__A : int = text.replace('\r\n' , '<BR>' )
__A : Optional[Any] = text.replace('\n' , '<BR>' )
__A : Any = text.replace('\r' , '<BR>' )
__A : Tuple = text.replace('\t' , '<TAB>' )
__A : Optional[Any] = text.replace('—' , 'ー' )
__A : str = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__A : List[str] = text.replace(_A , _A )
if clean:
__A : Union[str, Any] = self.clean_text(_A )
def check_simbol(_A ):
__A : List[Any] = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
__A : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(_A ):
__A : int = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
__A : int = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
__A : Union[str, Any] = 0
__A : Optional[int] = []
while pos < len(_A ):
__A : str = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__A : List[str] = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
__A : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
__A : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
__A , __A , __A : List[str] = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
__A : Optional[int] = e
else:
__A : Any = pos + 1
__A : int = text[pos:end]
if check_simbol(_A ):
result.append('<KIGOU>' )
elif checkuae(_A ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__A : Tuple = end
return result
def UpperCAmelCase_ ( self , _A , _A="\n" ):
__A : Optional[Any] = []
__A : int = []
__A : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__A : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__A : List[Any] = ''.join(_A )
return text
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
from __future__ import annotations
__lowerCamelCase : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : float = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ : Any = arr[j]
break
result.append(lowerCAmelCase )
return result
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, outer in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ : List[str] = inner
break
result.append(lowerCAmelCase )
return result
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : list[float] = []
SCREAMING_SNAKE_CASE_ : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : Dict = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {'''vocab_file''': '''vocab.txt'''}
lowercase : Dict = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase : Optional[int] = {
'''YituTech/conv-bert-base''': 5_12,
'''YituTech/conv-bert-medium-small''': 5_12,
'''YituTech/conv-bert-small''': 5_12,
}
lowercase : Dict = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class UpperCAmelCase_ ( A__ ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
snake_case_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
snake_case_ : Dict = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
snake_case_ : Optional[Any] = do_lower_case
snake_case_ : Optional[Any] = strip_accents
snake_case_ : str = tokenize_chinese_chars
snake_case_ : Union[str, Any] = normalizer_class(**lowerCamelCase__ )
snake_case_ : List[str] = do_lower_case
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[Any]:
snake_case_ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 356 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ) -> Dict:
snake_case_ : int = []
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
self.events.append("on_init_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
self.events.append("on_train_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_train_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
self.events.append("on_epoch_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_epoch_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_step_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_step_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_evaluate" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_predict" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_save" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_log" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
self.events.append("on_prediction_step" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[int] = tempfile.mkdtemp()
def _lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.output_dir )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Dict:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : Any = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = RegressionModelConfig(a=_SCREAMING_SNAKE_CASE , b=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = RegressionPreTrainedModel(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=_SCREAMING_SNAKE_CASE , report_to=[] , **_SCREAMING_SNAKE_CASE )
return Trainer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , callbacks=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , _SCREAMING_SNAKE_CASE )
else:
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : int = ["on_init_end", "on_train_begin"]
snake_case_ : Any = 0
snake_case_ : Dict = len(trainer.get_eval_dataloader() )
snake_case_ : Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = self.get_trainer()
snake_case_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : List[Any] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case_ : str = self.get_trainer()
snake_case_ : Tuple = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
snake_case_ : List[str] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case_ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
snake_case_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
snake_case_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 36 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
__lowercase , __lowercase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
__lowercase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
__lowercase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__lowercase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 40 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self : Tuple , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_55 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : str , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 2_24}
__UpperCAmelCase : List[str] = get_size_dict(a_ , default_to_square=a_ )
__UpperCAmelCase : int = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__UpperCAmelCase : Optional[int] = get_size_dict(a_ , default_to_square=a_ , param_name='''crop_size''' )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Union[str, Any] = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : Any = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[str] = do_convert_rgb
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCAmelCase : Optional[int] = get_resize_output_image_size(a_ , size=size['''shortest_edge'''] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Union[str, Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def snake_case__ ( self : Union[str, Any] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , ):
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Any , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[Any] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(a_ , param_name='''size''' , default_to_square=a_ )
__UpperCAmelCase : int = resample if resample is not None else self.resample
__UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Any = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(a_ , param_name='''crop_size''' , default_to_square=a_ )
__UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : Optional[Any] = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(a_ ) for image in images]
if do_resize:
__UpperCAmelCase : int = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
__UpperCAmelCase : List[str] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Dict = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[int] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
__UpperCAmelCase : Optional[int] = [to_channel_dimension_format(a_ , a_ ) for image in images]
__UpperCAmelCase : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 226 | 0 |
def __A ( _lowercase = 1_00 ):
'''simple docstring'''
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 75 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self: Any ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Any:
_A = '''google/ncsnpp-church-256'''
_A = UNetaDModel.from_pretrained(__A )
_A = ScoreSdeVeScheduler.from_pretrained(__A )
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75 | 1 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ : List[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase_ : Optional[int] = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
@lru_cache()
def A__ ( ) -> Dict:
UpperCamelCase_: Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase_: Optional[int] = bs[:]
UpperCamelCase_: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase_: Dict = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: int = set()
UpperCamelCase_: Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_: Tuple = char
return pairs
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : str="replace" , snake_case_ : Any="<s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Any="<pad>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : Union[str, Any]=False , **snake_case_ : Optional[Any] , ):
UpperCamelCase_: Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCamelCase_: List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCamelCase_: Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCamelCase_: Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCamelCase_: Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCamelCase_: Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_: Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase_: str = json.load(snake_case_ )
UpperCamelCase_: Tuple = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: List[Any] = errors # how to handle errors in decoding
UpperCamelCase_: List[str] = bytes_to_unicode()
UpperCamelCase_: int = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase_: Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase_: Any = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_: List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: List[str] = {}
UpperCamelCase_: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_: Any = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return len(self.encoder )
def lowerCAmelCase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[str] ):
if token in self.cache:
return self.cache[token]
UpperCamelCase_: Optional[int] = tuple(snake_case_ )
UpperCamelCase_: List[Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCamelCase_: Any = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_: int = bigram
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Dict = 0
while i < len(snake_case_ ):
try:
UpperCamelCase_: Union[str, Any] = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_: Optional[Any] = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_: Any = tuple(snake_case_ )
UpperCamelCase_: Tuple = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCamelCase_: Dict = get_pairs(snake_case_ )
UpperCamelCase_: Optional[Any] = """ """.join(snake_case_ )
UpperCamelCase_: Optional[int] = word
return word
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int ):
UpperCamelCase_: Tuple = []
for token in re.findall(self.pat , snake_case_ ):
UpperCamelCase_: Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Dict ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
return self.decoder.get(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] ):
UpperCamelCase_: str = """""".join(snake_case_ )
UpperCamelCase_: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: int = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
UpperCamelCase_: List[Any] = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase_: Optional[int] = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_: Tuple = [self.cls_token_id]
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Optional[int] = [self.sep_token_id]
UpperCamelCase_: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : Any=False , **snake_case_ : Optional[Any] ):
UpperCamelCase_: Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCamelCase_: Tuple = """ """ + text
return (text, kwargs)
| 356 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223 | 0 |
from __future__ import annotations
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 ,node_index * 2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,)
if is_max
else min(
minimax(depth + 1 ,node_index * 2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,)
)
def A ( ) -> None:
lowerCamelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowerCamelCase : Tuple = math.log(len(_SCREAMING_SNAKE_CASE ) ,2 )
print(f'''Optimal value : {minimax(0 ,0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase ,lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'bird'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_snake_case , jax.device_count() )
lowerCAmelCase = replicate(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase ,lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'Chef in the kitchen'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_snake_case , jax.device_count() )
lowerCAmelCase = replicate(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 309 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCAmelCase_ = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def lowerCamelCase__ ( A__ : Any , A__ : List[str] ):
'''simple docstring'''
return torch.atana(snake_case__ , snake_case__ ) / math.pi * 2
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
__lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case__ , snake_case__ )
class lowerCamelCase__( UpperCAmelCase__):
pass
class lowerCamelCase__( nn.Module):
def __init__( self: str , UpperCamelCase_: List[str] ):
super().__init__()
__lowerCamelCase = DiffusionAttnUnetaD(_a , n_attn_layers=4 )
__lowerCamelCase = deepcopy(self.diffusion )
__lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_a )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
UpperCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
UpperCAmelCase_ = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
UpperCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
UpperCAmelCase_ = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
UpperCAmelCase_ = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
UpperCAmelCase_ = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
return name.replace(snake_case__ , snake_case__ )
elif name.startswith(snake_case__ ):
return [name.replace(snake_case__ , snake_case__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any]=13 ):
'''simple docstring'''
__lowerCamelCase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__lowerCamelCase = 0
if string.startswith("""net.3.""" ):
depth += 1
__lowerCamelCase = string[6:]
elif string.startswith("""net.""" ):
__lowerCamelCase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__lowerCamelCase = string[7:]
if string.startswith("""main.""" ):
__lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
__lowerCamelCase = string[:2]
__lowerCamelCase = string[2:]
else:
__lowerCamelCase = string[0]
__lowerCamelCase = string[1:]
if depth == max_depth:
__lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
__lowerCamelCase = """mid_block"""
elif depth > 0 and int(snake_case__ ) < 7:
__lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
__lowerCamelCase = f'down_blocks.{depth}'
elif depth > 0 and int(snake_case__ ) > 7:
__lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
__lowerCamelCase = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
__lowerCamelCase = f'up_blocks.{max_depth - 1}' if int(snake_case__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
__lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
__lowerCamelCase = convert_resconv_naming(snake_case__ )
elif "attentions" in new_layer:
__lowerCamelCase = convert_attn_naming(snake_case__ )
__lowerCamelCase = new_string_left
if not isinstance(snake_case__ , snake_case__ ):
__lowerCamelCase = prefix + """.""" + new_layer + """.""" + string_left
else:
__lowerCamelCase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCamelCase = rename(snake_case__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case__ , snake_case__ ):
__lowerCamelCase = transform_conv_attns(snake_case__ , snake_case__ , snake_case__ )
else:
__lowerCamelCase = v
return new_state_dict
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : Tuple ):
'''simple docstring'''
if len(snake_case__ ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCamelCase = v[:, :, 0]
else:
# bias
__lowerCamelCase = v
else:
# qkv matrices
__lowerCamelCase = v.shape[0]
__lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowerCamelCase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__lowerCamelCase = download(snake_case__ )
__lowerCamelCase = MODELS_MAP[model_name]["""sample_rate"""]
__lowerCamelCase = MODELS_MAP[model_name]["""sample_size"""]
__lowerCamelCase = Object()
__lowerCamelCase = sample_size
__lowerCamelCase = sample_rate
__lowerCamelCase = 0
__lowerCamelCase = UNetaDModel(sample_size=snake_case__ , sample_rate=snake_case__ )
__lowerCamelCase = diffusers_model.state_dict()
__lowerCamelCase = DiffusionUncond(snake_case__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case__ )["""state_dict"""] )
__lowerCamelCase = orig_model.diffusion_ema.eval()
__lowerCamelCase = orig_model.state_dict()
__lowerCamelCase = rename_orig_weights(snake_case__ )
__lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(snake_case__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__lowerCamelCase = value.squeeze()
__lowerCamelCase = value
diffusers_model.load_state_dict(snake_case__ )
__lowerCamelCase = 100
__lowerCamelCase = 33
__lowerCamelCase = IPNDMScheduler(num_train_timesteps=snake_case__ )
__lowerCamelCase = torch.manual_seed(snake_case__ )
__lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=snake_case__ ).to(snake_case__ )
__lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=snake_case__ )[:-1]
__lowerCamelCase = get_crash_schedule(snake_case__ )
__lowerCamelCase = DanceDiffusionPipeline(unet=snake_case__ , scheduler=snake_case__ )
__lowerCamelCase = torch.manual_seed(33 )
__lowerCamelCase = pipe(num_inference_steps=snake_case__ , generator=snake_case__ ).audios
__lowerCamelCase = sampling.iplms_sample(snake_case__ , snake_case__ , snake_case__ , {} )
__lowerCamelCase = generated.clamp(-1 , 1 )
__lowerCamelCase = (generated - audio).abs().sum()
__lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , snake_case__ )
print("""Diff max""" , snake_case__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase_ = parser.parse_args()
main(args)
| 12 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 291 | 0 |
__snake_case :str = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 350 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Dict = logging.get_logger()
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : list = field(default_factory=__UpperCAmelCase )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = len(list(m.modules())) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE)
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__SCREAMING_SNAKE_CASE)
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return list(filter(lambda __SCREAMING_SNAKE_CASE: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int = 1
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : bool = True
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = Tracker(self.dest)(__SCREAMING_SNAKE_CASE).parametrized
__a = Tracker(self.src)(__SCREAMING_SNAKE_CASE).parametrized
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.src_skip , __SCREAMING_SNAKE_CASE))
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.dest_skip , __SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE)} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE)}.')
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : nn.Module):
'''simple docstring'''
super().__init__()
__a = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block'''), F'Unexpected layer name {k}'
__a = len(__SCREAMING_SNAKE_CASE) + 1
feature_blocks.append((F'res{block_index}', v))
__a = nn.ModuleDict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
return get_trunk_forward_outputs(
__SCREAMING_SNAKE_CASE , out_feat_keys=__SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = x.split('''-''')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if x not in self:
__a = self.convert_name_to_timm(__SCREAMING_SNAKE_CASE)
__a = partial(lambda: (timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE).eval(), None))
else:
__a = super().__getitem__(__SCREAMING_SNAKE_CASE)
return val
class _A ( __UpperCAmelCase ):
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a = RegNetModel
else:
__a = RegNetForImageClassification
return val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for from_key, to_key in keys:
__a = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
print(f'Converting {name}...' )
with torch.no_grad():
__a , __a = from_model_func()
__a = our_model_func(_UpperCAmelCase ).eval()
__a = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase , raise_if_mismatch=_UpperCAmelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
if from_state_dict is not None:
__a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__a = manually_copy_vissl_head(_UpperCAmelCase , our_model.state_dict() , _UpperCAmelCase )
our_model.load_state_dict(_UpperCAmelCase )
__a = our_model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__a = (
our_outputs.logits if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else our_outputs.last_hidden_state
)
__a = from_model(_UpperCAmelCase )
__a = from_output[-1] if type(_UpperCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , )
__a = 224 if '''seer''' not in name else 384
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_UpperCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {name}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
__a = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
__a = NameToOurModelFuncMap()
__a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCAmelCase , _UpperCAmelCase ) -> Tuple[nn.Module, Dict]:
__a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , model_dir=str(_UpperCAmelCase ) , map_location='''cpu''' )
__a = model_func()
# check if we have a head, if yes add it
__a = files['''classy_state_dict''']['''base_model''']['''model''']
__a = model_state_dict['''trunk''']
model.load_state_dict(_UpperCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__snake_case :Tuple = parser.parse_args()
__snake_case :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
class A__ ( A__ ):
A__ = ['input_ids', 'attention_mask']
def __init__( self : Any , _a : List[str]="</s>" , _a : Optional[int]="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[int]=125 , _a : Optional[Any]=None , **_a : Optional[Any] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_SCREAMING_SNAKE_CASE =[f"<extra_id_{i}>" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_SCREAMING_SNAKE_CASE =len(set(filter(lambda _a : bool('extra_id' in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
super().__init__(
eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , **_a , )
_SCREAMING_SNAKE_CASE =extra_ids
_SCREAMING_SNAKE_CASE =2**8 # utf is 8 bits
# define special tokens dict
_SCREAMING_SNAKE_CASE ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_SCREAMING_SNAKE_CASE =len(self.special_tokens_encoder )
_SCREAMING_SNAKE_CASE =len(_a )
for i, token in enumerate(_a ):
_SCREAMING_SNAKE_CASE =self.vocab_size + i - n
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def A ( self : str ) -> Dict:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def A ( self : str , _a : List[int] ) -> List[int]:
'''simple docstring'''
if len(_a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A ( self : Union[str, Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(_a )
if token_ids_a is None:
return token_ids_a
else:
_SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(_a )
return token_ids_a + token_ids_a
def A ( self : List[Any] , _a : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[chr(_a ) for i in text.encode('utf-8' )]
return tokens
def A ( self : List[Any] , _a : List[Any] ) -> List[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_SCREAMING_SNAKE_CASE =self.added_tokens_encoder[token]
elif len(_a ) != 1:
_SCREAMING_SNAKE_CASE =self.unk_token_id
else:
_SCREAMING_SNAKE_CASE =ord(_a ) + self._num_special_tokens
return token_id
def A ( self : Tuple , _a : Optional[int] ) -> str:
'''simple docstring'''
if index in self.special_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[index]
else:
_SCREAMING_SNAKE_CASE =chr(index - self._num_special_tokens )
return token
def A ( self : int , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =b''
for token in tokens:
if token in self.special_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_SCREAMING_SNAKE_CASE =token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_SCREAMING_SNAKE_CASE =token.encode('utf-8' )
else:
_SCREAMING_SNAKE_CASE =bytes([ord(_a )] )
bstring += tok_string
_SCREAMING_SNAKE_CASE =bstring.decode('utf-8' , errors='ignore' )
return string
def A ( self : int , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 47 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["PerceiverFeatureExtractor"]
lowerCamelCase_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
__lowerCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
__lowerCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 1_28
elif "large" in model_name:
__lowerCamelCase = 1_92
elif "xlarge" in model_name:
__lowerCamelCase = 2_56
elif "huge" in model_name:
__lowerCamelCase = 3_52
# set label information
__lowerCamelCase = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = '''imagenet-22k-id2label.json'''
else:
__lowerCamelCase = '''imagenet-1k-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__lowerCamelCase = '''encoder.''' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__lowerCamelCase = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__lowerCamelCase = '''layernorm.weight'''
if name == "norm.bias":
__lowerCamelCase = '''layernorm.bias'''
if "head" in name:
__lowerCamelCase = name.replace('''head''' , '''classifier''' )
else:
__lowerCamelCase = '''focalnet.''' + name
return name
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[Any]:
# fmt: off
__lowerCamelCase = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=2_24 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='''pt''' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__UpperCAmelCase =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_A : Optional[int] = """
Human: <<task>>
Assistant: """
_A : List[Any] = """huggingface-tools/default-prompts"""
_A : Optional[int] = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __magic_name__ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Dict="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __snake_case ) is not None:
return prompt_or_repo_id
lowercase : Optional[int] = cached_file(
__snake_case , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__snake_case , "r" , encoding="utf-8" ) as f:
return f.read()
| 202 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( snake_case , snake_case , snake_case ) -> str:
# Construct model
if gpta_config_file == "":
lowercase__: str = GPTaConfig()
else:
lowercase__: Tuple = GPTaConfig.from_json_file(snake_case )
lowercase__: Union[str, Any] = GPTaModel(snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(snake_case , snake_case , snake_case )
# Save pytorch-model
lowercase__: int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase__: Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__lowerCAmelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 288 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[int] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Tuple = True
__lowercase : str = {}
__lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase__: Optional[int] = {'unk_token': '<unk>'}
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = 'lower newer'
lowercase__: Dict = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Dict = 'lower newer'
lowercase__: Union[str, Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Tuple = tokens + [tokenizer.unk_token]
lowercase__: Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Dict = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__: Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase__: Tuple = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: int = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase__: str = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Tuple = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase__: str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__: Optional[int] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Optional[int] = F'{text_of_1_token} {text_of_1_token}'
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
lowercase__: Any = F' {text}'
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 288 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =2_56
# Modulus to hash a string
SCREAMING_SNAKE_CASE_: int =1_00_00_03
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
if p_len > t_len:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case_ ):
UpperCAmelCase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = "abc1abc12"
UpperCAmelCase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCAmelCase_ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(snake_case_ , snake_case_ ) and not rabin_karp(snake_case_ , snake_case_ )
# Test 2)
UpperCAmelCase_ = "ABABX"
UpperCAmelCase_ = "ABABZABABYABABX"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 3)
UpperCAmelCase_ = "AAAB"
UpperCAmelCase_ = "ABAAAAAB"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 4)
UpperCAmelCase_ = "abcdabcy"
UpperCAmelCase_ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(snake_case_ , snake_case_ )
# Test 5)
UpperCAmelCase_ = "Lü"
UpperCAmelCase_ = "Lüsai"
assert rabin_karp(snake_case_ , snake_case_ )
UpperCAmelCase_ = "Lue"
assert not rabin_karp(snake_case_ , snake_case_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 1 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class a_ ( __lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , **lowercase__ : Union[str, Any]):
'''simple docstring'''
super().__init__(**_a)
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
# No specific FOR_XXX available yet
def __call__( self : List[Any] , lowercase__ : Optional[Any] , **lowercase__ : int):
'''simple docstring'''
return super().__call__(_a , **_a)
def __snake_case ( self : Optional[int] , **lowercase__ : str):
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : int=None , lowercase__ : Dict="This is a sound of {}."):
'''simple docstring'''
if isinstance(_a , _a):
if audio.startswith('http://') or audio.startswith('https://'):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ = requests.get(_a).content
else:
with open(_a , 'rb') as f:
lowerCAmelCase__ = f.read()
if isinstance(_a , _a):
lowerCAmelCase__ = ffmpeg_read(_a , self.feature_extractor.sampling_rate)
if not isinstance(_a , np.ndarray):
raise ValueError('We expect a numpy ndarray as input')
if len(audio.shape) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline')
lowerCAmelCase__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt')
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(_a) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(_a , return_tensors=self.framework , padding=_a)
lowerCAmelCase__ = [text_inputs]
return inputs
def __snake_case ( self : Tuple , lowercase__ : str):
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop('candidate_labels')
lowerCAmelCase__ = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , _a):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**_a , **_a)
lowerCAmelCase__ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __snake_case ( self : Union[str, Any] , lowercase__ : Tuple):
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop('candidate_labels')
lowerCAmelCase__ = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=0)
lowerCAmelCase__ = probs.tolist()
else:
raise ValueError('`tf` framework not supported.')
lowerCAmelCase__ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_a , _a) , key=lambda lowercase__: -x[0])
]
return result
| 350 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
"""simple docstring"""
from math import isqrt, loga
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, A_, A_ ):
_lowerCamelCase : str = False
return [i for i in range(2, A_ ) if is_prime[i]]
def snake_case_ ( A_ : int = 80_08_00, A_ : int = 80_08_00 ):
'''simple docstring'''
_lowerCamelCase : Dict = degree * loga(A_ )
_lowerCamelCase : Any = int(A_ )
_lowerCamelCase : List[Any] = calculate_prime_numbers(A_ )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Dict = 0
_lowerCamelCase : str = len(A_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36 | 0 |
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
UpperCamelCase_ = [True] * n
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase_ = i * 2
while index < n:
UpperCamelCase_ = False
UpperCamelCase_ = index + i
UpperCamelCase_ = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 9_9_9_9_6_6_6_6_3_3_3_3 )-> int:
"""simple docstring"""
UpperCamelCase_ = math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) + 1_0_0
UpperCamelCase_ = prime_sieve(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase_ = primes[prime_index + 1]
UpperCamelCase_ = last_prime**2
UpperCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 60 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowercase , cache_dir=_lowercase )
UpperCamelCase_ = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , "snapshots" ) )]
UpperCamelCase_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 4
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
UpperCamelCase_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=_lowercase , steps_offset=1 , )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
UpperCamelCase_ = scheduler.create_state()
UpperCamelCase_ = scheduler_state
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase , )
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase_ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase_ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 60 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a_ : List[str] = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
a_ : Tuple = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
a_ : Optional[Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
a_ : Dict = requests.get(image_url).content
a_ : int = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 75 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase :ClassVar[Features] = Features({"audio": Audio()} )
_lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel} )
_lowerCamelCase :str = "audio"
_lowerCamelCase :str = "labels"
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : str = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 366 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 | 0 |
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = RobertaTokenizer
__A = RobertaTokenizerFast
__A = True
__A = {"cls_token": "<s>"}
def lowercase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ :List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ :List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ :Union[str, Any] = {"unk_token": "<unk>"}
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : str , **lowercase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : int , **lowercase : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = "lower newer"
lowercase_ :Any = "lower newer"
return input_text, output_text
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ :Dict = "lower newer"
lowercase_ :Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase_ :int = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
lowercase_ :Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ :Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.tokenizer_class.from_pretrained("roberta-base" )
lowercase_ :Any = tokenizer.encode("sequence builders" , add_special_tokens=lowercase )
lowercase_ :str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase )
lowercase_ :int = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :str = tokenizer.build_inputs_with_special_tokens(lowercase )
lowercase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_tokenizer()
lowercase_ :str = "Encode this sequence."
lowercase_ :Tuple = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
lowercase_ :Union[str, Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
lowercase_ :Any = tokenizer.convert_tokens_to_ids(lowercase )
lowercase_ :Tuple = "Encode <mask> sequence"
lowercase_ :int = "Encode <mask>sequence"
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :Any = encoded.index(lowercase )
lowercase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :int = encoded.index(lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase__ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :str = "A, <mask> AllenNLP sentence."
lowercase_ :Tuple = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowercase_ :str = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase_ :Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase_ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowercase__ ( self : Dict ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ :Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ :Optional[Any] = F'{text_of_1_token} {text_of_1_token}'
lowercase_ :int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Any = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 223 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case__ : int = False
snake_case__ : Tuple = True
snake_case__ : Optional[Any] = False
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case__ : Optional[Any] = parser.parse_args()
snake_case__ : Union[str, Any] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case__ : str = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case__ : int = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case__ : List[str] = reader.read()
snake_case__ : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case__ : List[Any] = UNetaDModel(**config)
else:
snake_case__ : List[str] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case__ : Tuple = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case__ : Tuple = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case__ : List[str] = config[key]
del config[key]
snake_case__ : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case__ : Optional[Any] = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case__ : Tuple = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case__ : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case__ : Tuple = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case__ : Union[str, Any] = param_value
snake_case__ : List[str] = True
if not has_changed:
snake_case__ : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 274 | '''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Tuple = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[Any] = '''ViTImageProcessor'''
lowerCamelCase_ :int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCAmelCase_ : int = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
UpperCAmelCase_ : int = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ : Tuple = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ : Dict = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 274 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase , _lowerCAmelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase : int = controlnet_params
_lowerCAmelCase : Dict = """bird"""
_lowerCAmelCase : str = jax.device_count()
_lowerCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_lowerCAmelCase : Dict = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
_lowerCAmelCase : int = replicate(snake_case_ )
_lowerCAmelCase : Optional[Any] = shard(snake_case_ )
_lowerCAmelCase : List[Any] = shard(snake_case_ )
_lowerCAmelCase : int = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=5_0 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase : Union[str, Any] = controlnet_params
_lowerCAmelCase : str = """Chef in the kitchen"""
_lowerCAmelCase : List[str] = jax.device_count()
_lowerCAmelCase : Any = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_lowerCAmelCase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCAmelCase : Dict = jax.random.PRNGKey(0 )
_lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
_lowerCAmelCase : Tuple = replicate(snake_case_ )
_lowerCAmelCase : List[Any] = shard(snake_case_ )
_lowerCAmelCase : Optional[Any] = shard(snake_case_ )
_lowerCAmelCase : Tuple = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=5_0 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_lowerCAmelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : Optional[Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 309 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _snake_case ( lowercase__ ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _snake_case ( lowercase__ , lowercase__ = True ):
_lowerCamelCase : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_lowerCamelCase : Optional[int] = is_compiled_module(lowercase__ )
if is_compiled:
_lowerCamelCase : Optional[int] = model
_lowerCamelCase : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
_lowerCamelCase : Any = getattr(lowercase__ , 'forward' )
_lowerCamelCase : Tuple = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
_lowerCamelCase : List[Any] = forward.__wrapped__
if forward == original_forward:
break
_lowerCamelCase : Optional[Any] = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
_lowerCamelCase : List[Any] = model
_lowerCamelCase : str = compiled_model
return model
def _snake_case ( ):
PartialState().wait_for_everyone()
def _snake_case ( lowercase__ , lowercase__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _snake_case ( **lowercase__ ):
for key, value in kwargs.items():
_lowerCamelCase : List[str] = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _snake_case ( lowercase__ ):
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
_lowerCamelCase : List[Any] = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _snake_case ( lowercase__ , lowercase__ ):
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = value
return destination
def _snake_case ( lowercase__ = None ):
if port is None:
_lowerCamelCase : List[str] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0 | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
import string
import numpy
def lowercase__ ( __snake_case : Any , __snake_case : Tuple ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _a )
class lowerCamelCase :
'''simple docstring'''
_snake_case : int = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_snake_case : Tuple = numpy.vectorize(lambda _snake_case : x % 3_6 )
_snake_case : List[Any] = numpy.vectorize(_lowercase )
def __init__( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[Any] = self.modulus(_SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ : List[str] = encrypt_key.shape[0]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
return self.key_string.index(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
return self.key_string[round(_SCREAMING_SNAKE_CASE )]
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase_ : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ : Union[str, Any] = det % len(self.key_string )
UpperCAmelCase_ : Any = len(self.key_string )
if greatest_common_divisor(_SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1:
UpperCAmelCase_ : str = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : str = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ : List[str] = chars[-1]
while len(_SCREAMING_SNAKE_CASE ) % self.break_key != 0:
chars.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[Any] = self.process_text(text.upper() )
UpperCAmelCase_ : Optional[int] = ''''''
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ : Dict = text[i : i + self.break_key]
UpperCAmelCase_ : str = [self.replace_letters(_SCREAMING_SNAKE_CASE ) for char in batch]
UpperCAmelCase_ : str = numpy.array([vec] ).T
UpperCAmelCase_ : List[str] = self.modulus(self.encrypt_key.dot(_SCREAMING_SNAKE_CASE ) ).T.tolist()[
0
]
UpperCAmelCase_ : str = ''''''.join(
self.replace_digits(_SCREAMING_SNAKE_CASE ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCAmelCase ( self ) -> numpy.ndarray:
UpperCAmelCase_ : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ : List[Any] = det % len(self.key_string )
UpperCAmelCase_ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase_ : str = i
break
UpperCAmelCase_ : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Tuple = self.make_decrypt_key()
UpperCAmelCase_ : Dict = self.process_text(text.upper() )
UpperCAmelCase_ : Dict = ''''''
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ : Any = text[i : i + self.break_key]
UpperCAmelCase_ : Tuple = [self.replace_letters(_SCREAMING_SNAKE_CASE ) for char in batch]
UpperCAmelCase_ : Dict = numpy.array([vec] ).T
UpperCAmelCase_ : int = self.modulus(decrypt_key.dot(_SCREAMING_SNAKE_CASE ) ).T.tolist()[0]
UpperCAmelCase_ : Dict = ''''''.join(
self.replace_digits(_SCREAMING_SNAKE_CASE ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = int(input('Enter the order of the encryption key: ' ) )
UpperCAmelCase_ : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_a ):
UpperCAmelCase_ : List[str] = [int(_a ) for x in input().split()]
hill_matrix.append(_a )
UpperCAmelCase_ : Any = HillCipher(numpy.array(_a ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
UpperCAmelCase_ : Tuple = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
UpperCAmelCase_ : Any = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_a ) )
elif option == "2":
UpperCAmelCase_ : Optional[int] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
_a : Dict = None
def UpperCAmelCase__( self : Union[str, Any] )-> int:
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : Optional[int] )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : int )-> str:
lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 131 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase_ ( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__UpperCAmelCase )
a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__UpperCAmelCase )
a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 351 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
'''simple docstring'''
from collections import defaultdict
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Any = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowercase_ : Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase_ ) )
]
lowercase_ : int = defaultdict(lowercase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowercase_ : List[str] = (1 << len(lowercase_ )) - 1
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowercase_ : int = self.count_ways_until(lowercase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowercase_ : Tuple = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
# Store the list of persons for each task
for i in range(len(lowercase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowercase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowercase : List[str] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 239 | '''simple docstring'''
from collections import defaultdict
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> bool:
lowercase_ : Tuple = first_str.lower().strip()
lowercase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
lowercase_ : Dict = first_str.replace(""" """ , """""" )
lowercase_ : Tuple = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
return False
# Default values for count should be 0
lowercase_ : defaultdict[str, int] = defaultdict(UpperCAmelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : int = input("Enter the first string ").strip()
_lowercase : Tuple = input("Enter the second string ").strip()
_lowercase : Union[str, Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 239 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase_ : str = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase_ : Optional[int] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A__ ( lowerCamelCase ) -> str:
if "://" in dataset_path:
UpperCamelCase_: str = dataset_path.split("""://""" )[1]
return dataset_path
def A__ ( lowerCamelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
UpperCamelCase_: int = not is_remote_filesystem(lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase ) , fs._strip_protocol(lowerCamelCase ) )
else:
fs.mv(lowerCamelCase , lowerCamelCase , recursive=lowerCamelCase )
def A__ ( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase_: Dict = None
UpperCamelCase_: int = None
UpperCamelCase_: str = threading.Lock()
| 360 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = BigBirdTokenizer
__a = BigBirdTokenizerFast
__a = True
__a = True
def lowercase ( self : int ):
super().setUp()
_snake_case = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Any ):
_snake_case = '''<s>'''
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def lowercase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowercase ( self : Dict ):
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = '''I was born in 92000, and this is falsé.'''
_snake_case = tokenizer.tokenize(_lowerCamelCase )
_snake_case = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
_snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
_snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowercase ( self : Optional[Any] ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowercase ( self : List[Any] ):
_snake_case = '''Hello World!'''
_snake_case = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def lowercase ( self : int ):
_snake_case = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
_snake_case = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def lowercase ( self : str ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_snake_case = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
_snake_case = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
_snake_case = BigBirdConfig(attention_type='''original_full''' )
_snake_case = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
_snake_case = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowercase ( self : Tuple ):
# fmt: off
_snake_case = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 288 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_snake_case = QuantumRegister(__lowerCamelCase , '''qr''' )
_snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' )
_snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
_snake_case = number_of_qubits
for i in range(__lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase )
# simulate with 10000 shots
_snake_case = Aer.get_backend('''qasm_simulator''' )
_snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 288 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Union[str, Any]:
# word like '180' or '身高' or '神'
for char in word:
_snake_case = ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> str:
_snake_case = set()
for token in tokens:
_snake_case = len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
_snake_case = list(lowerCAmelCase__ )
return word_list
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : set() ) -> List[Any]:
if not chinese_word_set:
return bert_tokens
_snake_case = max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
_snake_case = bert_tokens
_snake_case = 0, len(lowerCAmelCase__ )
while start < end:
_snake_case = True
if is_chinese(bert_word[start] ):
_snake_case = min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
_snake_case = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case = '''##''' + bert_word[j]
_snake_case = start + i
_snake_case = False
break
if single_word:
start += 1
return bert_word
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : LTP , __lowerCamelCase : BertTokenizer ) -> int:
_snake_case = []
for i in range(0 , len(lowerCAmelCase__ ) , 1_00 ):
_snake_case = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
_snake_case = [get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_snake_case = []
for i in range(0 , len(lowerCAmelCase__ ) , 1_00 ):
_snake_case = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_snake_case = []
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = []
for id in input_ids:
_snake_case = bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
_snake_case = add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
_snake_case = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[str]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_snake_case = f.readlines()
_snake_case = [line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case = LTP(args.ltp ) # faster in GPU device
_snake_case = BertTokenizer.from_pretrained(args.bert )
_snake_case = prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_snake_case = [json.dumps(lowerCAmelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 351 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """roberta"""
def __init__( self : str , _lowerCamelCase : Dict=50265 , _lowerCamelCase : Tuple=768 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Optional[int]=3072 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : int=2 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : List[Any]=1e-12 , _lowerCamelCase : int=1 , _lowerCamelCase : int=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]="absolute" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = classifier_dropout
class lowerCAmelCase__ ( A_ ):
@property
def lowercase ( self : Dict ):
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 40 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( snake_case__ : Tuple ) -> List[str]:
UpperCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
UpperCAmelCase__ : Optional[int] = True
@property
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = 4
UpperCamelCase : List[str] = (16, 16)
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=SCREAMING_SNAKE_CASE_, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=SCREAMING_SNAKE_CASE_, only_cross_attention=SCREAMING_SNAKE_CASE_, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
UpperCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='quick_gelu', projection_dim=512, )
UpperCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_, scheduler_enum.name )
UpperCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
outputs.append(SCREAMING_SNAKE_CASE_ )
assert check_same_shape(SCREAMING_SNAKE_CASE_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCamelCase : int = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type='latent' ).images
UpperCamelCase : List[str] = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCamelCase : str = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 119 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(lowercase_ ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 248 | 0 |
"""simple docstring"""
class snake_case_:
def __init__( self : str , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : List[Any] = val
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : List[str] = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Dict = val
def _snake_case ( _snake_case : List[str] , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Tuple ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Any = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : str = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 60 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Any = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : int = self.__multiply()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Tuple = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Dict = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : List[Any] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : Optional[Any] = new_dft
lowerCAmelCase : Union[str, Any] = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.__dft('''A''' )
lowerCAmelCase : Optional[int] = self.__dft('''B''' )
lowerCAmelCase : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : str = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Union[str, Any] = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : Optional[Any] = self.root ** (next_ncol // 2)
lowerCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ):
lowerCAmelCase : int = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : str = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : int = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_snake_case : Tuple = 500_000
_snake_case , _snake_case : str = os.path.split(__file__)
_snake_case : Optional[int] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCAmelCase_ ( __lowerCamelCase , **__lowerCamelCase ):
__snake_case : List[Any] = dataset.map(**__lowerCAmelCase )
@get_duration
def lowerCAmelCase_ ( __lowerCamelCase , **__lowerCamelCase ):
__snake_case : Optional[int] = dataset.filter(**__lowerCAmelCase )
def lowerCAmelCase_ ( ):
__snake_case : str = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : List[str] = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
__snake_case : List[str] = generate_example_dataset(
os.path.join(__lowerCAmelCase , "dataset.arrow" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
__snake_case : str = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCamelCase ):
return tokenizer(examples["text"] )
__snake_case : int = map(__lowerCAmelCase )
__snake_case : Optional[int] = map(__lowerCAmelCase , batched=__lowerCAmelCase )
__snake_case : List[str] = map(__lowerCAmelCase , function=lambda __lowerCamelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
__snake_case : Optional[int] = map(__lowerCAmelCase , function=lambda __lowerCamelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
__snake_case : int = map(__lowerCAmelCase , function=lambda __lowerCamelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
__snake_case : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCamelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
__snake_case : Optional[int] = map(__lowerCAmelCase , function=lambda __lowerCamelCase : None , batched=__lowerCAmelCase )
__snake_case : Optional[Any] = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
__snake_case : Dict = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 353 |
from ..utils import DummyObject, requires_backends
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["speech"]
def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ["speech"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["speech"]
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["speech"] )
| 134 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
a_ : Any = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__ ( self) ->int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) ->str:
a_ = (3, 32, 1_28)
a_ = tempfile.mkdtemp()
# fmt: off
a_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
a_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
a_ = os.path.join(self.tmpdirname , __UpperCAmelCase)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)
a_ = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1))
return image_input
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.get_tokenizer()
a_ = self.get_image_processor()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
a_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.get_tokenizer()
a_ = self.get_image_processor()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
a_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
a_ = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0)
a_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = self.prepare_image_inputs()
a_ = image_processor(__UpperCAmelCase , return_tensors="np")
a_ = processor(images=__UpperCAmelCase , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = 'test'
a_ = processor(text=__UpperCAmelCase)
a_ = tokenizer(__UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = 'test'
a_ = self.prepare_image_inputs()
a_ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "labels"])
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a_ = processor.char_decode(__UpperCAmelCase)
a_ = tokenizer.batch_decode(__UpperCAmelCase)
a_ = [seq.replace(" " , "") for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = None
a_ = self.prepare_image_inputs()
a_ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
a_ = torch.randn(1 , 27 , 38)
a_ = torch.randn(1 , 27 , 5_02_57)
a_ = torch.randn(1 , 27 , 3_05_22)
a_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"]) | 243 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[Any] = []
def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 212 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( enum.Enum ):
"""simple docstring"""
snake_case = 0
snake_case = 1
@add_end_docstrings(UpperCamelCase )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "generated"
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = {}
if truncation is not None:
A_ : Tuple = truncation
A_ : Optional[Any] = generate_kwargs
A_ : Tuple = {}
if return_tensors is not None and return_type is None:
A_ : List[str] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
A_ : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
A_ : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return True
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : str = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
A_ : Dict = ([prefix + arg for arg in args[0]],)
A_ : str = True
elif isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = (prefix + args[0],)
A_ : Dict = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
A_ : Tuple = self.tokenizer(*_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if (
isinstance(args[0] , _SCREAMING_SNAKE_CASE )
and all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for el in args[0] )
and all(len(_SCREAMING_SNAKE_CASE ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Tuple = self._parse_and_tokenize(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
if self.framework == "pt":
A_ : Optional[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
A_ : Union[str, Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
A_ : Dict = generate_kwargs.get('''min_length''' , self.model.config.min_length )
A_ : Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_SCREAMING_SNAKE_CASE , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
A_ : Union[str, Any] = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
A_ : str = output_ids.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.TEXT , _SCREAMING_SNAKE_CASE=False )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ : str = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
A_ : List[Any] = {
F'''{self.return_name}_text''': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
@add_end_docstrings(UpperCamelCase )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "summary"
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(UpperCamelCase )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "translation"
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , _SCREAMING_SNAKE_CASE ):
return self.tokenizer._build_translation_inputs(
*_SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE )
else:
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : int = super()._sanitize_parameters(**_SCREAMING_SNAKE_CASE )
if src_lang is not None:
A_ : Optional[Any] = src_lang
if tgt_lang is not None:
A_ : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ : str = kwargs.get('''task''' , self.task )
A_ : int = task.split('''_''' )
if task and len(_SCREAMING_SNAKE_CASE ) == 4:
# translation, XX, to YY
A_ : Optional[int] = items[1]
A_ : int = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 352 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCamelCase ( __a :str , __a :str , __a :Union[str, Any]=0 ) -> str:
"""simple docstring"""
if name is None:
A__ = None
else:
A__ = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
A__ = fmt.format(__a )
# Print and recurse (if needed).
if isinstance(__a , __a ):
if msg is not None:
print(__a )
for k in val.keys():
recursive_print(__a , val[k] , spaces + 2 )
elif isinstance(__a , torch.Tensor ):
print(__a , """:""" , val.size() )
else:
print(__a , """:""" , __a )
def __lowerCamelCase ( __a :Optional[int] , __a :Tuple , __a :Optional[Any] , __a :Tuple , __a :Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
A__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
A__ = param.view(*__a )
A__ = param.transpose(0 , 2 )
A__ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
A__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
A__ = param.view(*__a )
A__ = param.transpose(0 , 1 ).contiguous()
A__ = param.view(*__a )
return param
def __lowerCamelCase ( __a :Tuple , __a :str , __a :Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = {}
# old versions did not store training args
A__ = input_state_dict.get("""args""" , __a )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
A__ = ds_args.padded_vocab_size
A__ = ds_args.max_position_embeddings
A__ = ds_args.hidden_size
A__ = ds_args.num_layers
A__ = ds_args.num_attention_heads
A__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
A__ = config.n_head
# The hidden_size per head.
A__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
A__ = input_state_dict["""checkpoint_version"""]
else:
A__ = 0.0
# The model.
A__ = input_state_dict["""model"""]
# The language model.
A__ = model["""language_model"""]
# The embeddings.
A__ = lm["""embedding"""]
# The word embeddings.
A__ = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
A__ = word_embeddings[: config.vocab_size, :]
A__ = word_embeddings
# The position embeddings.
A__ = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
A__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
A__ = pos_embeddings
# The transformer.
A__ = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
A__ = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
A__ = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
A__ = layer_re.match(__a )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
A__ = int(m.group(1 ) )
# The name of the operation.
A__ = m.group(2 )
# Is it a weight or a bias?
A__ = m.group(3 )
# The name of the layer.
A__ = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
A__ = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
A__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
A__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __a , __a )
A__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
A__ = torch.tensor(-1E4 , dtype=torch.floataa )
A__ = masked_bias
A__ = fix_query_key_value_ordering(__a , __a , 3 , __a , __a )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
A__ = out_val.transpose(0 , 1 ).contiguous()
# Store.
A__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
A__ = fix_query_key_value_ordering(__a , __a , 3 , __a , __a )
# Store. No change of shape.
A__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
A__ = megatron_to_transformers[op_name]
A__ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
A__ = megatron_to_transformers[op_name]
A__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
A__ = transformer["""final_layernorm.weight"""]
A__ = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
A__ = word_embeddings
# It should be done!
return output_state_dict
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__a , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__a , help="""An optional config json file describing the pre-trained model.""" , )
A__ = parser.parse_args()
# Extract the basename.
A__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
A__ = torch.load(__a , map_location="""cpu""" )
else:
A__ = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
A__ = input_state_dict.get("""args""" , __a )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
A__ = """gelu_fast"""
elif ds_args.openai_gelu:
A__ = """gelu_new"""
else:
A__ = """gelu"""
else:
# in the very early days this used to be "gelu_new"
A__ = """gelu_new"""
# Spell out all parameters in case the defaults change.
A__ = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__a , summary_activation=__a , summary_proj_to_labels=__a , summary_first_dropout=0.1 , scale_attn_weights=__a , use_cache=__a , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
A__ = GPTaConfig.from_json_file(args.config_file )
A__ = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
A__ = convert_megatron_checkpoint(__a , __a , __a )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__a , __a )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
A__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
A__ = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
A__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
A__ = """gpt2"""
A__ = AutoTokenizer.from_pretrained(__a )
A__ = type(__a ).__name__
A__ = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__a )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__a )
# Store the state_dict to file.
A__ = os.path.join(__a , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__a , __a )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 274 |
import argparse
from collections import defaultdict
import yaml
A : str = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( __a :str ) -> List[Any]:
"""simple docstring"""
A__ = defaultdict(__a )
A__ = []
A__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__a )
A__ = new_doc_list
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__a )
# Sort
return overview_doc
def __lowerCamelCase ( __a :Any=False ) -> List[str]:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ = api_doc[scheduler_idx]["""sections"""]
A__ = clean_doc_toc(__a )
A__ = False
if new_scheduler_doc != scheduler_doc:
A__ = True
if overwrite:
A__ = new_scheduler_doc
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ = False
A__ = api_doc[pipeline_idx]["""sections"""]
A__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ = pipeline_doc["""section"""]
A__ = clean_doc_toc(__a )
if overwrite:
A__ = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
A__ = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
A__ = True
if overwrite:
A__ = new_pipeline_docs
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 274 | 1 |
from typing import Any
def __UpperCamelCase ( _A ):
if not input_list:
return []
lowerCAmelCase_ = [input_list.count(_A ) for value in input_list]
lowerCAmelCase_ = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 167 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase (__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = 'speech_to_text_2'
_UpperCAmelCase :Dict = ['past_key_values']
_UpperCAmelCase :Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _UpperCAmelCase=10000 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
lowercase__: Tuple = vocab_size
lowercase__: Union[str, Any] = d_model
lowercase__: Any = decoder_ffn_dim
lowercase__: List[Any] = decoder_layers
lowercase__: int = decoder_attention_heads
lowercase__: str = dropout
lowercase__: int = attention_dropout
lowercase__: Optional[Any] = activation_dropout
lowercase__: List[Any] = activation_function
lowercase__: Optional[Any] = init_std
lowercase__: Dict = decoder_layerdrop
lowercase__: Any = use_cache
lowercase__: Optional[int] = decoder_layers
lowercase__: Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__: Dict = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : str = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowercase__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__A : Optional[int] = model_type_to_module_name(__snake_case )
__A : Optional[int] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__snake_case , '__name__' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A : Union[str, Any] = importlib.import_module('transformers' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def _lowerCAmelCase ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> Tuple:
__A : int = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__snake_case , encoding='utf-8' ) as reader:
return json.load(__snake_case )
class SCREAMING_SNAKE_CASE :
def __init__( self):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = kwargs.pop('config' , _UpperCAmelCase)
__A : List[str] = kwargs.pop('trust_remote_code' , _UpperCAmelCase)
__A : Dict = True
__A ,__A : Union[str, Any] = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase)
__A : List[str] = config_dict.get('image_processor_type' , _UpperCAmelCase)
__A : Any = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {}):
__A : Union[str, Any] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__A : Optional[Any] = config_dict.pop('feature_extractor_type' , _UpperCAmelCase)
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.')
__A : Optional[Any] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor')
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
__A : List[str] = config_dict['auto_map']['AutoFeatureExtractor']
__A : Optional[Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor')
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
# It could be in `config.image_processor_type``
__A : Tuple = getattr(_UpperCAmelCase , 'image_processor_type' , _UpperCAmelCase)
if hasattr(_UpperCAmelCase , 'auto_map') and "AutoImageProcessor" in config.auto_map:
__A : Any = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__A : Any = image_processor_class_from_name(_UpperCAmelCase)
__A : int = image_processor_auto_map is not None
__A : List[str] = image_processor_class is not None or type(_UpperCAmelCase) in IMAGE_PROCESSOR_MAPPING
__A : Optional[Any] = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if has_remote_code and trust_remote_code:
__A : Tuple = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = kwargs.pop('code_revision' , _UpperCAmelCase)
if os.path.isdir(_UpperCAmelCase):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase) in IMAGE_PROCESSOR_MAPPING:
__A : Any = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase)]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}')
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase) | 190 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> float:
def get_matched_characters(__snake_case : str , __snake_case : str ) -> str:
__A : Optional[int] = []
__A : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A : str = int(max(0 , i - limit ) )
__A : str = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
__A : Dict = f'{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'
return "".join(__snake_case )
# matching characters
__A : Any = get_matched_characters(__snake_case , __snake_case )
__A : str = get_matched_characters(__snake_case , __snake_case )
__A : str = len(__snake_case )
# transposition
__A : Tuple = (
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
__A : int = 0.0
else:
__A : Union[str, Any] = (
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 190 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__UpperCAmelCase : Any = "▁"
# Segments (not really needed)
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : int = 1
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : Union[str, Any] = 4
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = """left"""
lowerCAmelCase__ = XLNetTokenizer
def __init__( self : Tuple , A : Dict=None , A : Union[str, Any]=None , A : Union[str, Any]=False , A : Any=True , A : Tuple=False , A : int="<s>" , A : str="</s>" , A : int="<unk>" , A : Optional[Any]="<sep>" , A : Dict="<pad>" , A : Tuple="<cls>" , A : List[Any]="<mask>" , A : Any=["<eop>", "<eod>"] , **A : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
__snake_case: List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
__snake_case: str = 3
__snake_case: int = do_lower_case
__snake_case: Tuple = remove_space
__snake_case: Union[str, Any] = keep_accents
__snake_case: Optional[Any] = vocab_file
__snake_case: Union[str, Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Dict , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Optional[int] = [self.sep_token_id]
__snake_case: Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Tuple = [self.sep_token_id]
__snake_case: Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Any , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case: Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 293 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase_ : Any = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DownloadCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
ServeCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
UserCommands.register_subcommand(__SCREAMING_SNAKE_CASE )
AddNewModelCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
AddNewModelLikeCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
LfsCommands.register_subcommand(__SCREAMING_SNAKE_CASE )
PTtoTFCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Let's go
lowercase_ : Optional[int] = parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ):
lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6
lowercase_ :List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223 | 0 |
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : str = "" , lowerCamelCase__ : bool = False ) -> None:
'''simple docstring'''
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : str = is_leaf
UpperCamelCase__ : Any = prefix
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = 0
for q, w in zip(self.prefix , lowerCamelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
UpperCamelCase__ : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : str = RadixNode(prefix=lowerCamelCase__ , is_leaf=lowerCamelCase__ )
else:
UpperCamelCase__ : Tuple = self.nodes[word[0]]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = incoming_node.match(
lowerCamelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Optional[int] = remaining_prefix
UpperCamelCase__ : List[Any] = self.nodes[matching_string[0]]
UpperCamelCase__ : Any = RadixNode(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = aux_node
if remaining_word == "":
UpperCamelCase__ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.nodes.get(word[0] , lowerCamelCase__ )
if not incoming_node:
return False
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = incoming_node.match(
lowerCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.nodes.get(word[0] , lowerCamelCase__ )
if not incoming_node:
return False
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = incoming_node.match(
lowerCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Any = list(self.nodes.values() )[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : str = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Optional[int] = merging_node.nodes
return True
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Union[str, Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def _a ( ):
"""simple docstring"""
assert test_trie()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Dict = RadixNode()
UpperCamelCase__ : int = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('''Words:''' , SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 51 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __magic_name__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
UpperCamelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase__ : int = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Optional[int] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '''__array__''' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : dict ) -> Optional[int]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : int = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
UpperCamelCase__ : str = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
UpperCamelCase__ : int = self.recursive_tensorize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self._consolidate(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : Dict = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
UpperCamelCase__ : Any = self.python_features_decoder.decode_batch(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
UpperCamelCase__ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 51 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = ['input_values', 'padding_mask']
def __init__( self : Optional[int] ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : int = 24000 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : float = None ,_UpperCAmelCase : float = None ,**_UpperCAmelCase : str ,):
super().__init__(feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = chunk_length_s
_a : str = overlap
@property
def __lowercase ( self : Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowercase ( self : Optional[int] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCAmelCase : Optional[bool] = False ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
_a : Union[str, Any] = True
_a : Optional[int] = bool(
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
_a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : List[str] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_a : Optional[int] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_a : int = [np.asarray(_UpperCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCAmelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
_a : Tuple = None
_a : str = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_a : Optional[Any] = min(array.shape[0] for array in raw_audio )
_a : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
_a : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_a : str = max(array.shape[0] for array in raw_audio )
_a : Optional[int] = int(np.ceil(max_length / self.chunk_stride ) )
_a : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_a : str = 'max_length'
else:
_a : Dict = input_values
# normal padding on batch
if padded_inputs is None:
_a : Tuple = self.pad(
_UpperCAmelCase ,max_length=_UpperCAmelCase ,truncation=_UpperCAmelCase ,padding=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,)
if padding:
_a : Optional[Any] = padded_inputs.pop('attention_mask' )
_a : Any = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
_a : Optional[int] = example[..., None]
input_values.append(example.T )
_a : Optional[Any] = input_values
if return_tensors is not None:
_a : str = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
| 89 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0 , UpperCamelCase__=None ):
__lowerCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCamelCase = math.ceil(val / multiple ) * multiple
return x
__lowerCamelCase = (output_size, output_size) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else output_size
__lowerCamelCase , __lowerCamelCase = get_image_size(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = output_size
# determine new height and width
__lowerCamelCase = output_height / input_height
__lowerCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCamelCase = scale_width
else:
# fit height
__lowerCamelCase = scale_height
__lowerCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=UpperCamelCase__ )
__lowerCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=UpperCamelCase__ )
return (new_height, new_width)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["pixel_values"]
def __init__( self : List[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
__lowerCamelCase = get_size_dict(a )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = keep_aspect_ratio
__lowerCamelCase = ensure_multiple_of
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(
a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a )
__lowerCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : str , a : List[Tuple] = None ):
"""simple docstring"""
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(a ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(a ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=a )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 350 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=10_00 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCamelCase = n - 1
__lowerCamelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCamelCase = 0
while count < prec:
__lowerCamelCase = random.randint(2 , n - 1 )
__lowerCamelCase = bin_exp_mod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if b != 1:
__lowerCamelCase = True
for _ in range(UpperCamelCase__ ):
if b == n - 1:
__lowerCamelCase = False
break
__lowerCamelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__UpperCAmelCase =abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 67 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = "cpu"
__UpperCamelCase : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCamelCase : int = "path-to-your-trained-model"
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : Any = torch.randn(2, 77, 768)
__UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : Optional[Any] = 666
__UpperCamelCase : int = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : int = {"generator": generator}
if args.steps is not None:
__UpperCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 370 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _a ( SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class __magic_name__ :
A: str = field(
metadata={"help": "The csv file to plot."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
A: bool = field(
default=__lowerCAmelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
A: Optional[List[str]] = list_field(
default=__lowerCAmelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class __magic_name__ :
def __init__( self : Any , lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = args
UpperCamelCase__ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCamelCase__ : Union[str, Any] = csv.DictReader(lowerCamelCase__ )
for row in reader:
UpperCamelCase__ : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = float(row['''result'''] )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = plt.subplots()
UpperCamelCase__ : Dict = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCamelCase__ : int = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCamelCase__ : Dict = self.result_dict[model_name]['''result''']
((UpperCamelCase__) , (UpperCamelCase__)) : int = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCamelCase__ : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCamelCase__ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase__ , )
else:
UpperCamelCase__ : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCamelCase__) , (UpperCamelCase__)) : str = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCamelCase__ : Optional[Any] = np.asarray(lowerCamelCase__ , lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__ , lowerCamelCase__ , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(lowerCamelCase__ , lowerCamelCase__ , '''--''' )
title_str += F" {label_model_name} vs."
UpperCamelCase__ : Optional[Any] = title_str[:-4]
UpperCamelCase__ : List[Any] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
UpperCamelCase__ : Dict = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ) -> Any:
a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=a , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=a , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=a )
return parser.parse_args()
def _a ( ) -> Tuple:
a = parse_args()
# Import training_script as a module.
a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a = script_fpath.stem
a = importlib.import_module(a )
# Patch sys.argv
a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Dict ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : Dict, __snake_case : int="attention" ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
A__ : str =k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
A__ : List[Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
A__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
A__ : Dict =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
A__ : Dict =q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
A__ : List[str] =v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Any, __snake_case : Tuple, __snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
A__ : Any =params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
A__ : Optional[Any] =(wi_a, wi_a)
else:
A__ : Optional[int] =params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any, __snake_case : int ) -> List[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __lowerCamelCase ( __snake_case : dict, *, __snake_case : int, __snake_case : bool, __snake_case : bool = False ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =traverse_util.flatten_dict(variables["""target"""] )
A__ : int ={"""/""".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ : List[Any] ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""", __snake_case )
A__ : Optional[int] =collections.OrderedDict()
# Shared embeddings.
A__ : List[Any] =old["""token_embedder/embedding"""]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : Optional[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[int] =tax_attention_lookup(__snake_case, __snake_case, """encoder""", """attention""" )
A__ : List[str] =layer_norm
A__ : Dict =k.T
A__ : Optional[int] =o.T
A__ : str =q.T
A__ : Any =v.T
# Block i, layer 1 (MLP).
A__ : List[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_mlp_layer_norm""" )
A__ , A__ : int =tax_mlp_lookup(__snake_case, __snake_case, """encoder""", __snake_case )
A__ : Optional[int] =layer_norm
if split_mlp_wi:
A__ : List[str] =wi[0].T
A__ : List[str] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : Optional[Any] =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : int =tax_relpos_bias_lookup(
__snake_case, __snake_case, """encoder""" ).T
A__ : Optional[int] =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
A__ : List[Any] =tax_relpos_bias_lookup(
__snake_case, 0, """encoder""" ).T
A__ : Tuple =tax_relpos_bias_lookup(
__snake_case, 0, """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : List[str] =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_self_attention_layer_norm""" )
A__ , A__ , A__ , A__ : List[str] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """self_attention""" )
A__ : str =layer_norm
A__ : List[str] =k.T
A__ : int =o.T
A__ : Tuple =q.T
A__ : Optional[Any] =v.T
# Block i, layer 1 (Cross Attention).
A__ : int =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_cross_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[Any] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """encoder_decoder_attention""" )
A__ : str =layer_norm
A__ : Union[str, Any] =k.T
A__ : str =o.T
A__ : Any =q.T
A__ : str =v.T
# Block i, layer 2 (MLP).
A__ : str =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_mlp_layer_norm""" )
A__ , A__ : Optional[int] =tax_mlp_lookup(__snake_case, __snake_case, """decoder""", __snake_case )
A__ : Dict =layer_norm
if split_mlp_wi:
A__ : List[Any] =wi[0].T
A__ : Union[str, Any] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : str =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : str =tax_relpos_bias_lookup(__snake_case, __snake_case, """decoder""" ).T
A__ : str =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ : Tuple =old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( __snake_case : Dict, __snake_case : bool ) -> Optional[Any]:
"""simple docstring"""
A__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ : Union[str, Any] =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ : List[str] =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
A__ : Optional[Any] =state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ : str =checkpoints.load_tax_checkpoint(__snake_case )
A__ : Optional[Any] =convert_tax_to_pytorch(
__snake_case, num_layers=config.num_layers, is_encoder_only=__snake_case, scalable_attention=__snake_case )
A__ : str =make_state_dict(__snake_case, __snake_case )
model.load_state_dict(__snake_case, strict=__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool = False, __snake_case : bool = False, ) -> Dict:
"""simple docstring"""
A__ : Tuple =MTaConfig.from_json_file(__snake_case )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ : List[Any] =UMTaEncoderModel(__snake_case )
else:
A__ : int =UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("""Done""" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 134 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCAmelCase ( UpperCamelCase__ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =["pixel_values"]
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Dict , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = crop_pct
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__lowerCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowerCamelCase = int(size['''height'''] / crop_pct )
else:
__lowerCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
__lowerCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
elif "height" in size and "width" in size:
__lowerCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
| 237 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Any = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : Optional[int] = {
"camembert-base": 5_1_2,
}
_lowerCamelCase : str = "▁"
class __UpperCAmelCase ( UpperCAmelCase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['input_ids', 'attention_mask']
UpperCamelCase = CamembertTokenizer
def __init__( self : Optional[Any], __A : List[str]=None, __A : Any=None, __A : Tuple="<s>", __A : int="</s>", __A : Union[str, Any]="</s>", __A : Optional[int]="<s>", __A : str="<unk>", __A : int="<pad>", __A : Dict="<mask>", __A : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"], **__A : List[Any], ):
UpperCAmelCase : str = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase, tokenizer_file=__UpperCAmelCase, bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, additional_special_tokens=__UpperCAmelCase, **__UpperCAmelCase, )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : Any = False if not self.vocab_file else True
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Optional[Any], __A : str, __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[Any] = os.path.join(
__UpperCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file, __UpperCAmelCase )
return (out_vocab_file,)
| 336 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ = '''examples/'''
lowerCAmelCase_ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase_ = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase_ = '''README.md'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Optional[int] = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Any = replace.replace('''VERSION''' , __lowerCamelCase )
snake_case_ : Optional[Any] = re_pattern.sub(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__lowerCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern='''examples''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = '''🤗 Transformers currently provides the following architectures'''
snake_case_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : List[Any] = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
snake_case_ : List[str] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCamelCase )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
snake_case_ : int = f.read()
snake_case_ : int = REPLACE_PATTERNS['''init'''][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=False ) -> int:
"""simple docstring"""
snake_case_ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
snake_case_ : Optional[Any] = default_version.base_version
elif patch:
snake_case_ : Optional[int] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case_ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case_ : Tuple = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCamelCase ) == 0:
snake_case_ : Dict = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowerCamelCase , patch=__lowerCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Tuple = get_version()
snake_case_ : Union[str, Any] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case_ : List[str] = current_version.base_version
# Check with the user we got that right.
snake_case_ : List[str] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCamelCase ) == 0:
snake_case_ : Optional[int] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowerCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 368 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.dummy_uncond_unet
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Optional[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : str = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = '''google/ddpm-cifar10-32'''
snake_case_ : Tuple = UNetaDModel.from_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Any = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=__magic_name__ , output_type='''numpy''' ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 279 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = len(_UpperCAmelCase )
# We need to create solution object to save path.
A_ : List[str] = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
A_ : int = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('''\n'''.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
A_ : str = 1
return True
A_ : int = (not i < 0) and (not j < 0) # Check lower bounds
A_ : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ : Any = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
A_ : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a__ ):
snake_case__ = ["note_seq"]
def __init__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : List[str] ):
requires_backends(self , ["note_seq"] )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["note_seq"] )
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["note_seq"] ) | 371 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[str] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : int = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : str = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Any = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : str = resample
__lowerCamelCase : str = do_center_crop
__lowerCamelCase : List[str] = crop_size
__lowerCamelCase : Union[str, Any] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
__lowerCamelCase : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ):
__lowerCamelCase : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : int , ):
__lowerCamelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int = image_std if image_std is not None else self.image_std
__lowerCamelCase : Optional[int] = size if size is not None else self.size
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Optional[Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Optional[int] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase : int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase : List[str] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : Optional[int] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCamelCase : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 64 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowercase__ : Any = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ) -> Any:
__A : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__A : int = bs[:]
__A : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
__A : Optional[int] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[Any]:
__A : str = set()
__A : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[Any] = char
return pairs
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Union[str, Any] = json.load(_UpperCAmelCase)
__A : str = {v: k for k, v in self.encoder.items()}
__A : Union[str, Any] = errors # how to handle errors in decoding
__A : Any = bytes_to_unicode()
__A : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : Optional[int] = merges_handle.read().split('\n')[1:-1]
__A : Tuple = [tuple(merge.split()) for merge in bpe_merges]
__A : Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Optional[int] = {}
__A : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : Union[str, Any] = tuple(_UpperCAmelCase)
__A : Union[str, Any] = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : Any = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : Tuple = bigram
__A : Dict = []
__A : List[str] = 0
while i < len(_UpperCAmelCase):
try:
__A : List[str] = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : Optional[Any] = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : Optional[Any] = tuple(_UpperCAmelCase)
__A : Tuple = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : Optional[int] = get_pairs(_UpperCAmelCase)
__A : List[Any] = ' '.join(_UpperCAmelCase)
__A : int = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = []
for token in re.findall(self.pat , _UpperCAmelCase):
__A : Any = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = ''.join(_UpperCAmelCase)
__A : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : str = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : int = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : List[str] = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
__A : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Union[str, Any] = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase):
'''simple docstring'''
__A : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase) > 0 and not text[0].isspace()):
__A : Optional[int] = ' ' + text
return (text, kwargs) | 190 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : Dict = np.array(__snake_case )
__A : List[Any] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'facebook/sam-vit-huge'
__A : Optional[Any] = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , ) | 190 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase ={
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = data
lowerCAmelCase__ :List[Any] = [0x67_45_23_01, 0xef_cd_ab_89, 0x98_ba_dc_fe, 0x10_32_54_76, 0xc3_d2_e1_f0]
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return ((n << b) | (n >> (3_2 - b))) & 0xff_ff_ff_ff
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = B'\x80' + B'\x00' * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCAmelCase__ :Any = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def snake_case ( self ):
'''simple docstring'''
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = list(struct.unpack('>16L' , __UpperCAmelCase ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowerCAmelCase__ :Any = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.padding()
lowerCAmelCase__ :List[str] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ :int = self.expand_block(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowerCAmelCase__ :Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ :Any = 0x5a_82_79_99
elif 2_0 <= i < 4_0:
lowerCAmelCase__ :str = b ^ c ^ d
lowerCAmelCase__ :Dict = 0x6e_d9_eb_a1
elif 4_0 <= i < 6_0:
lowerCAmelCase__ :Optional[int] = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ :str = 0x8f_1b_bc_dc
elif 6_0 <= i < 8_0:
lowerCAmelCase__ :Optional[Any] = b ^ c ^ d
lowerCAmelCase__ :Any = 0xca_62_c1_d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = (
self.rotate(__UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xff_ff_ff_ff,
a,
self.rotate(__UpperCAmelCase , 3_0 ),
c,
d,
)
lowerCAmelCase__ :Dict = (
self.h[0] + a & 0xff_ff_ff_ff,
self.h[1] + b & 0xff_ff_ff_ff,
self.h[2] + c & 0xff_ff_ff_ff,
self.h[3] + d & 0xff_ff_ff_ff,
self.h[4] + e & 0xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = B'Test String'
assert SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(_SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ :Dict = parser.parse_args()
lowerCAmelCase__ :Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ :List[Any] = f.read()
else:
lowerCAmelCase__ :int = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 293 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE__ : Any = True
except ImportError:
SCREAMING_SNAKE_CASE__ : str = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(torch_cache_home, "transformers")
SCREAMING_SNAKE_CASE__ : List[Any] = "https://cdn.huggingface.co"
SCREAMING_SNAKE_CASE__ : Dict = "https://s3.amazonaws.com/models.huggingface.co/bert"
SCREAMING_SNAKE_CASE__ : List[str] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(PATH, "config.yaml")
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(PATH, "attributes.txt")
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(PATH, "objects.txt")
SCREAMING_SNAKE_CASE__ : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
SCREAMING_SNAKE_CASE__ : Tuple = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE__ : Dict = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE__ : Tuple = "pytorch_model.bin"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "config.yaml"
def __magic_name__ ( __lowerCAmelCase : Optional[int]=OBJECTS , __lowerCAmelCase : Any=ATTRIBUTES ) -> Any:
__lowerCamelCase = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__lowerCamelCase = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Tuple:
__lowerCamelCase = OrderedDict()
with open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = pkl.load(__lowerCAmelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCamelCase = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
__lowerCamelCase = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
__lowerCamelCase = v
return r
class lowerCAmelCase__ :
a__ : Optional[Any] = {}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str = "root" , SCREAMING_SNAKE_CASE__ : Tuple=0 ) -> Optional[Any]:
__lowerCamelCase = name
__lowerCamelCase = level
__lowerCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = Config(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ , level=level + 1 )
__lowerCamelCase = v
setattr(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = d
def __repr__( self : int ) -> Union[str, Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
__lowerCamelCase = val
__lowerCamelCase = val
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
__lowerCamelCase = self._pointer
if len(SCREAMING_SNAKE_CASE__ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE__ ):
if hasattr(self , SCREAMING_SNAKE_CASE__ ) and isinstance(getattr(self , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
setattr(getattr(self , SCREAMING_SNAKE_CASE__ ) , '''.'''.join(levels[i:] ) , SCREAMING_SNAKE_CASE__ )
if l == last_level:
__lowerCamelCase = val
else:
__lowerCamelCase = pointer[l]
def __A ( self : List[str] ) -> Dict:
return self._pointer
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
with open(f'''{file_name}''' , '''w''' ) as stream:
dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
with open(f'''{file_name}''' , '''w''' ) as stream:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE__ ) as stream:
__lowerCamelCase = load(SCREAMING_SNAKE_CASE__ , Loader=SCREAMING_SNAKE_CASE__ )
return data
def __str__( self : int ) -> Any:
__lowerCamelCase = ''' '''
if self._name != "root":
__lowerCamelCase = f'''{t * (self._level-1)}{self._name}:\n'''
else:
__lowerCamelCase = ''''''
__lowerCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE__ ).__name__})\n'''
__lowerCamelCase = level
return r[:-1]
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : str , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
__lowerCamelCase = kwargs.pop('''cache_dir''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''force_download''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''resume_download''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''proxies''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''local_files_only''' , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif os.path.isfile(SCREAMING_SNAKE_CASE__ ) or is_remote_url(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = pretrained_model_name_or_path
else:
__lowerCamelCase = hf_bucket_url(SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , use_cdn=SCREAMING_SNAKE_CASE__ )
try:
# Load from URL or cache if already cached
__lowerCamelCase = cached_path(
SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCamelCase = Config.load_yaml(SCREAMING_SNAKE_CASE__ )
except EnvironmentError:
__lowerCamelCase = '''Can\'t load config for'''
raise EnvironmentError(SCREAMING_SNAKE_CASE__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(SCREAMING_SNAKE_CASE__ ), kwargs
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Tuple:
__lowerCamelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__lowerCamelCase = in_tensor.numpy()
__lowerCamelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=True ) -> str:
__lowerCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCamelCase = '''/''' not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Tuple=None , ) -> Optional[Any]:
__lowerCamelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
__lowerCamelCase = {'''user-agent''': ua}
if resume_size > 0:
__lowerCamelCase = '''bytes=%d-''' % (resume_size,)
__lowerCamelCase = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCamelCase = response.headers.get('''Content-Length''' )
__lowerCamelCase = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
__lowerCamelCase = tqdm(
unit='''B''' , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=False , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=10 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=False , ) -> Dict:
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__lowerCamelCase = None
if not local_files_only:
try:
__lowerCamelCase = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
__lowerCamelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCamelCase = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
__lowerCamelCase = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCamelCase = cache_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCamelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , '''a+b''' ) as f:
yield f
__lowerCamelCase = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
__lowerCamelCase = os.stat(__lowerCAmelCase ).st_size
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
__lowerCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
__lowerCamelCase = {'''url''': url, '''etag''': etag}
__lowerCamelCase = cache_path + '''.json'''
with open(__lowerCAmelCase , '''w''' ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=None ) -> int:
__lowerCamelCase = url.encode('''utf-8''' )
__lowerCamelCase = shaaaa(__lowerCAmelCase )
__lowerCamelCase = url_hash.hexdigest()
if etag:
__lowerCamelCase = etag.encode('''utf-8''' )
__lowerCamelCase = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=False , ) -> List[str]:
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCamelCase = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
__lowerCamelCase = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCamelCase , __lowerCamelCase = os.path.split(__lowerCAmelCase )
__lowerCamelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCamelCase = output_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , '''r''' ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
__lowerCamelCase = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]="," ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
__lowerCamelCase = eval(f.read() )
else:
__lowerCamelCase = requests.get(__lowerCAmelCase )
try:
__lowerCamelCase = requests.json()
except Exception:
__lowerCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCamelCase = eval(__lowerCAmelCase )
except Exception:
__lowerCamelCase = data.split('''\n''' )
req.close()
return data
def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> List[str]:
__lowerCamelCase = requests.get(__lowerCAmelCase )
__lowerCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
__lowerCamelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as stream:
__lowerCamelCase = pkl.load(__lowerCAmelCase )
__lowerCamelCase = weights.pop('''model''' )
__lowerCamelCase = {}
for k, v in model.items():
__lowerCamelCase = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
__lowerCamelCase = torch.tensor([0] )
__lowerCamelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__lowerCamelCase = zero
return new
def __magic_name__ ( ) -> Any:
print(f'''{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb''' )
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict="RGB" ) -> str:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = cva.imread(__lowerCAmelCase )
else:
__lowerCamelCase = get_image_from_url(__lowerCAmelCase )
assert img is not None, f'''could not connect to: {im}'''
__lowerCamelCase = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCamelCase = img[:, :, ::-1]
return img
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=1 ) -> Optional[int]:
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
| 339 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.