code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
if hor == 128:
_a = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_a = (32, 128, 256)
_a = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
_a = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_a = (32, 64, 128, 256)
_a = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_a = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_a = model.state_dict()
_a = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_a = UNetaDModel(**_UpperCAmelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_a = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_a = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_a = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_a = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_a = model
_a = UNetaDModel(**_UpperCAmelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_a = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_a = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 562 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Distribution , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ):
_a = 1.0 if scale is None else scale
_a = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] )
@property
def _UpperCAmelCase ( self : List[str] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self : int ):
return self.variance.sqrt()
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE_ : int ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
_a = args_dim
_a = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] )
_a = domain_map
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
_a = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE_ )
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__()
_a = function
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : int ):
return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
class _UpperCamelCase :
'''simple docstring'''
_A = 42
_A = 42
_A = 42
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 1 ):
_a = dim
_a = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , ):
_a = self._base_distribution(SCREAMING_SNAKE_CASE_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self : Any ):
return len(self.event_shape )
@property
def _UpperCAmelCase ( self : Tuple ):
return 0.0
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = {"df": 1, "loc": 1, "scale": 1}
_A = StudentT
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
_a = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_a = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = {"loc": 1, "scale": 1}
_A = Normal
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
_a = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = {"total_count": 1, "logits": 1}
_A = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
_a = cls.squareplus(SCREAMING_SNAKE_CASE_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
_a , _a = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 )
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None ):
_a , _a = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 562 | 1 |
import argparse
import os
import re
import packaging.version
UpperCamelCase = """examples/"""
UpperCamelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase = """README.md"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : Optional[Any] = f.read()
A_ , A_ : Optional[Any] = REPLACE_PATTERNS[pattern]
A_ : Tuple = replace.replace('''VERSION''' , SCREAMING_SNAKE_CASE )
A_ : Optional[int] = re_pattern.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , pattern='''examples''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
A_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : Union[str, Any] = f.readlines()
# Find the start of the list.
A_ : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
A_ : int = f.read()
A_ : Tuple = REPLACE_PATTERNS['''init'''][0].search(SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=False ):
A_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ : Optional[Any] = default_version.base_version
elif patch:
A_ : Dict = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ : int = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ : List[str] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
A_ : Dict = default_version
print(f'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE , patch=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = get_version()
A_ : Optional[Any] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ : Dict = current_version.base_version
# Check with the user we got that right.
A_ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
A_ : Optional[int] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 152 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["input_values", "padding_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2_4000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Dict = chunk_length_s
A_ : Any = overlap
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
A_ : int = True
A_ : str = bool(
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A_ : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A_ : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(_SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
A_ : int = None
A_ : Optional[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A_ : List[str] = min(array.shape[0] for array in raw_audio )
A_ : int = int(np.floor(max_length / self.chunk_stride ) )
A_ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A_ : Optional[int] = max(array.shape[0] for array in raw_audio )
A_ : Any = int(np.ceil(max_length / self.chunk_stride ) )
A_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
A_ : Dict = '''max_length'''
else:
A_ : str = input_values
# normal padding on batch
if padded_inputs is None:
A_ : Dict = self.pad(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if padding:
A_ : Any = padded_inputs.pop('''attention_mask''' )
A_ : str = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
A_ : int = example[..., None]
input_values.append(example.T )
A_ : Union[str, Any] = input_values
if return_tensors is not None:
A_ : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 152 | 1 |
'''simple docstring'''
from torch import nn
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 26 |
def a__ ( A__, A__ ):
def get_matched_characters(A__, A__ ) -> str:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(max(0, i - limit ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = F'''{_stra[0:_stra.index(A__ )]} {_stra[_stra.index(A__ ) + 1:]}'''
return "".join(A__ )
# matching characters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : int = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : Any = len(A__ )
# transposition
SCREAMING_SNAKE_CASE_ : Optional[int] = (
len([(ca, ca) for ca, ca in zip(A__, A__ ) if ca != ca] ) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE_ : Dict = 0.0
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
1
/ 3
* (
match_count / len(A__ )
+ match_count / len(A__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 101 | 0 |
import os
from datetime import datetime as dt
from github import Github
_a : str = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ):
"""simple docstring"""
_snake_case : str = Github(os.environ["GITHUB_TOKEN"] )
_snake_case : str = g.get_repo("huggingface/accelerate" )
_snake_case : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
_snake_case : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda a : i.created_at , reverse=a )
_snake_case : str = comments[0] if len(a ) > 0 else None
_snake_case : List[Any] = dt.utcnow()
_snake_case : str = (current_time - issue.updated_at).days
_snake_case : List[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''van'''
def __init__( self :Tuple , lowerCAmelCase__ :int=224 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Dict=[7, 3, 3, 3] , lowerCAmelCase__ :Optional[Any]=[4, 2, 2, 2] , lowerCAmelCase__ :Optional[int]=[64, 128, 320, 512] , lowerCAmelCase__ :Any=[3, 3, 12, 3] , lowerCAmelCase__ :List[str]=[8, 8, 4, 4] , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Any=1E-6 , lowerCAmelCase__ :Union[str, Any]=1E-2 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Any=0.0 , **lowerCAmelCase__ :int , ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : List[str] = patch_sizes
__SCREAMING_SNAKE_CASE : Any = strides
__SCREAMING_SNAKE_CASE : str = hidden_sizes
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : int = mlp_ratios
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : str = dropout_rate
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
def UpperCamelCase_ ( __a = 3 , __a = 7 , __a = 1_000_000 ) -> int:
a__ : Dict = 0
a__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
a__ : Any = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ : Optional[Any] = current_numerator
a__ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 702 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_snake_case = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 282 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=1024 ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase = list(zip(_lowercase , _lowercase ) )
UpperCamelCase , UpperCamelCase = sorted_examples[0]
def is_too_big(_lowercase ):
return tok(_lowercase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase = new_src + ' ' + src
UpperCamelCase = new_tgt + ' ' + tgt
if is_too_big(_lowercase ) or is_too_big(_lowercase ): # cant fit, finalize example
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
UpperCamelCase , UpperCamelCase = src, tgt
else: # can fit, keep adding
UpperCamelCase , UpperCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
return finished_src, finished_tgt
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCamelCase = Path(_lowercase )
save_path.mkdir(exist_ok=_lowercase )
for split in ["train"]:
UpperCamelCase , UpperCamelCase = data_dir / F'{split}.source', data_dir / F'{split}.target'
UpperCamelCase = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCamelCase = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCamelCase , UpperCamelCase = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase )
print(F'packed {split} split from {len(_lowercase )} examples -> {len(_lowercase )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(_lowercase ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(_lowercase ) )
for split in ["val", "test"]:
UpperCamelCase , UpperCamelCase = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_lowercase , save_path / F'{split}.source' )
shutil.copyfile(_lowercase , save_path / F'{split}.target' )
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_lowercase , default=128 )
parser.add_argument('--data_dir' , type=_lowercase )
parser.add_argument('--save_path' , type=_lowercase )
UpperCamelCase = parser.parse_args()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 282 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase__ ( _A , _A ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a : Dict = flax_key_tuple[:-1] + ('weight',)
a : str = torch.permute(_A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ):
# linear layer
a : Any = flax_key_tuple[:-1] + ('weight',)
a : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a : List[Any] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase__ ( _A , _A , _A ):
if "metadata" in layer:
a : List[str] = layer.split('metadata' )
a : str = ''.join(split_layer[0] )[:-1]
a : Optional[Any] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
a : Tuple = layer.split('kvstore' )
a : List[Any] = ''.join(split_layer[0] )[:-1]
a : Tuple = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
a : Dict = layer.split('/' )
a : int = '/'.join(split_layer[:-1] )
a : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a : Any = 'file'
else:
a : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase__ ( _A , _A ):
a : int = rename_keys(_A )
a : Union[str, Any] = {}
for k, v in current_block.items():
a : Tuple = v
a : List[str] = new_current_block
torch.save(_A , _A )
def lowerCamelCase__ ( _A , _A , _A , _A , _A = WEIGHTS_NAME ):
a : Optional[Any] = convert_file_size_to_int(_A )
a : Any = []
a : Tuple = {}
a : Union[str, Any] = 0
a : Union[str, Any] = 0
os.makedirs(_A , exist_ok=_A )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
a : str = serialization.msgpack_restore(fp.read() )['optimizer']['target']
a : Union[str, Any] = flatten_dict(_A , sep='/' )
a : Tuple = {}
for layer in checkpoint_info.keys():
a , a , a : str = get_key_and_tensorstore_dict(
_A , _A , _A )
if curr_real_layer_name in all_layers:
a : Dict = content
else:
a : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a : Tuple = torch.tensor(_A )
a : Tuple = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a : Any = rename_base_flax_keys(tuple(key.split('/' ) ) , _A )
a : List[Any] = '/'.join(_A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a : Tuple = os.path.join(
_A , weights_name.replace('.bin' , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
del current_block
a : List[str] = {}
a : Dict = 0
a : Optional[int] = raw_weights.to(getattr(_A , _A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a : Dict = os.path.join(_A , weights_name.replace('.bin' , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a : Optional[int] = {}
a : List[str] = {}
for idx, shard in enumerate(_A ):
a : str = weights_name.replace(
'.bin' , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a : Optional[int] = os.path.join(_A , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
a : Optional[Any] = shard
for key in shard:
a : Optional[Any] = shard_file
# Add the metadata
a : Dict = {'total_size': total_size}
a : str = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_A , _A ) , 'w' , encoding='utf-8' ) as f:
a : str = json.dumps(_A , indent=2 , sort_keys=_A ) + '\n'
f.write(_A )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase: str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase__ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a : str = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
a : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
a : Dict = TaTokenizer.from_pretrained('t5-small' )
a : Optional[int] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
a : Union[str, Any] = tokenizer(_A , return_tensors='pt' ).input_ids
a : Union[str, Any] = model.generate(_A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 195 |
'''simple docstring'''
def lowerCamelCase__ ( _A = 6008_5147_5143 ):
try:
a : Optional[int] = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a : Any = 1
a : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
a : str = i
n //= i
i += 1
if n > 1:
a : Any = n
return int(_A )
if __name__ == "__main__":
print(F"{solution() = }") | 195 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if args.student_type == "roberta":
__UpperCAmelCase : Any = False
elif args.student_type == "gpt2":
__UpperCAmelCase : List[Any] = False
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if args.student_type == "roberta":
__UpperCAmelCase : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_UpperCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_UpperCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_UpperCAmelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_UpperCAmelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_UpperCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_UpperCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_UpperCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_UpperCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_UpperCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=_UpperCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_UpperCAmelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_UpperCAmelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_UpperCAmelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_UpperCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_UpperCAmelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_UpperCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_UpperCAmelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=_UpperCAmelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_UpperCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=_UpperCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=_UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_UpperCAmelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=_UpperCAmelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_UpperCAmelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_UpperCAmelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_UpperCAmelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_UpperCAmelCase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_UpperCAmelCase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_UpperCAmelCase , default=4000 , help='''Checkpoint interval.''' )
__UpperCAmelCase : str = parser.parse_args()
sanity_checks(_UpperCAmelCase )
# ARGS #
init_gpu_params(_UpperCAmelCase )
set_seed(_UpperCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_UpperCAmelCase ) , _UpperCAmelCase , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase : Optional[Any] = MODEL_CLASSES[args.student_type]
__UpperCAmelCase : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase : Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase : List[str] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase : Tuple = tokenizer.all_special_tokens.index(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
__UpperCAmelCase : Tuple = special_tok_ids
__UpperCAmelCase : List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase : int = pickle.load(_UpperCAmelCase )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase : List[Any] = pickle.load(_UpperCAmelCase )
__UpperCAmelCase : Dict = np.maximum(_UpperCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase : int = 0.0 # do not predict special tokens
__UpperCAmelCase : List[str] = torch.from_numpy(_UpperCAmelCase )
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = LmSeqsDataset(params=_UpperCAmelCase , data=_UpperCAmelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
__UpperCAmelCase : List[str] = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
__UpperCAmelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = student_model_class(_UpperCAmelCase )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase : Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCAmelCase )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_UpperCAmelCase , _UpperCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_UpperCAmelCase , _UpperCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase : List[Any] = Distiller(
params=_UpperCAmelCase , dataset=_UpperCAmelCase , token_probs=_UpperCAmelCase , student=_UpperCAmelCase , teacher=_UpperCAmelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 462 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''cvt'''
def __init__( self : Optional[Any] , __a : Union[str, Any]=3 , __a : List[Any]=[7, 3, 3] , __a : Optional[int]=[4, 2, 2] , __a : Dict=[2, 1, 1] , __a : Union[str, Any]=[64, 192, 384] , __a : int=[1, 3, 6] , __a : List[str]=[1, 2, 10] , __a : Optional[Any]=[4.0, 4.0, 4.0] , __a : Any=[0.0, 0.0, 0.0] , __a : List[str]=[0.0, 0.0, 0.0] , __a : List[Any]=[0.0, 0.0, 0.1] , __a : List[str]=[True, True, True] , __a : int=[False, False, True] , __a : Dict=["dw_bn", "dw_bn", "dw_bn"] , __a : List[str]=[3, 3, 3] , __a : Union[str, Any]=[1, 1, 1] , __a : Optional[int]=[2, 2, 2] , __a : Optional[Any]=[1, 1, 1] , __a : List[str]=[1, 1, 1] , __a : List[str]=0.0_2 , __a : List[str]=1e-12 , **__a : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**__a )
__snake_case : int = num_channels
__snake_case : Union[str, Any] = patch_sizes
__snake_case : Any = patch_stride
__snake_case : List[str] = patch_padding
__snake_case : Optional[Any] = embed_dim
__snake_case : Union[str, Any] = num_heads
__snake_case : Dict = depth
__snake_case : Optional[Any] = mlp_ratio
__snake_case : List[str] = attention_drop_rate
__snake_case : Optional[int] = drop_rate
__snake_case : Optional[int] = drop_path_rate
__snake_case : Any = qkv_bias
__snake_case : int = cls_token
__snake_case : Optional[int] = qkv_projection_method
__snake_case : List[Any] = kernel_qkv
__snake_case : List[Any] = padding_kv
__snake_case : int = stride_kv
__snake_case : List[str] = padding_q
__snake_case : Dict = stride_q
__snake_case : Tuple = initializer_range
__snake_case : List[Any] = layer_norm_eps
| 286 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(lowerCAmelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(lowerCAmelCase__ , 2 )
return binary_recursive(lowerCAmelCase__ ) + str(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = str(lowerCAmelCase__ ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"""{negative}0b{binary_recursive(int(lowerCAmelCase__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
import heapq
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set[int]:
UpperCamelCase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__UpperCAmelCase , [-1 * len(__UpperCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCamelCase__ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCamelCase__ : Dict = heapq.heappop(__UpperCAmelCase )[1][0]
chosen_vertices.add(__UpperCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCamelCase__ : Tuple = elem[1][1].index(__UpperCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__UpperCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 253 |
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCamelCase__ : Any = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCAmelCase ) )
return round(__UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 1 |
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Dict = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Dict = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
lowerCamelCase_ : Optional[int] = factorials.pop()
lowerCamelCase_ , lowerCamelCase_ : Tuple = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case_ : List[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case_ : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case_ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, float]:
"""simple docstring"""
lowerCamelCase_ : Tuple = len([g for position, g in enumerate(__UpperCAmelCase ) if g == main_target[position]] )
return (item, float(__UpperCAmelCase ))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = random.randint(0 , len(__UpperCAmelCase ) - 1 )
lowerCamelCase_ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase_ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = list(__UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase_ : Optional[int] = random.choice(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def __a ( __UpperCAmelCase : tuple[str, float] , __UpperCAmelCase : list[tuple[str, float]] , __UpperCAmelCase : list[str] , ) -> list[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
# Generate more children proportionally to the fitness score.
lowerCamelCase_ : str = int(parent_a[1] * 100 ) + 1
lowerCamelCase_ : List[str] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCAmelCase ):
lowerCamelCase_ : str = population_score[random.randint(0 , __UpperCAmelCase )][0]
lowerCamelCase_ , lowerCamelCase_ : str = crossover(parent_a[0] , __UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
return pop
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] , __UpperCAmelCase : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowerCamelCase_ : Tuple = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase_ : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase_ : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCAmelCase )
# Generate random starting population.
lowerCamelCase_ : int = []
for _ in range(__UpperCAmelCase ):
population.append("".join([random.choice(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase_ , lowerCamelCase_ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase_ : int = [evaluate(__UpperCAmelCase , __UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase_ : Any = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase_ : List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCamelCase_ : Optional[int] = [
(item, score / len(__UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCAmelCase ):
population.extend(select(population_score[int(__UpperCAmelCase )] , __UpperCAmelCase , __UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case_ : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
snake_case_ : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 253 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]:
random.seed(a__ )
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase_ :
def __init__( self : Optional[int] , __A : Iterable[torch.nn.Parameter] , __A : float = 0.9_9_9_9 , __A : float = 0.0 , __A : int = 0 , __A : bool = False , __A : Union[float, int] = 1.0 , __A : Union[float, int] = 2 / 3 , __A : Optional[Any] = None , __A : Dict[str, Any] = None , **__A : Union[str, Any] , ):
if isinstance(__A , torch.nn.Module ):
__A : Optional[Any] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __A , standard_warn=__A , )
__A : List[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__A : List[Any] = True
if kwargs.get("""max_value""" , __A ) is not None:
__A : List[Any] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __A , standard_warn=__A )
__A : Any = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __A ) is not None:
__A : List[str] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __A , standard_warn=__A )
__A : List[Any] = kwargs["""min_value"""]
__A : Optional[int] = list(__A )
__A : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __A ) is not None:
__A : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __A , standard_warn=__A )
self.to(device=kwargs["""device"""] )
__A : Tuple = None
__A : str = decay
__A : List[str] = min_decay
__A : Tuple = update_after_step
__A : Dict = use_ema_warmup
__A : Optional[Any] = inv_gamma
__A : Any = power
__A : Optional[int] = 0
__A : Union[str, Any] = None # set in `step()`
__A : Optional[int] = model_cls
__A : str = model_config
@classmethod
def lowerCAmelCase_ ( cls : Tuple , __A : List[Any] , __A : Optional[Any] ):
__A , __A : str = model_cls.load_config(__A , return_unused_kwargs=__A )
__A : Optional[Any] = model_cls.from_pretrained(__A )
__A : Optional[Any] = cls(model.parameters() , model_cls=__A , model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCAmelCase_ ( self : Optional[int] , __A : Tuple ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
__A : List[str] = self.model_cls.from_config(self.model_config )
__A : List[str] = self.state_dict()
state_dict.pop("""shadow_params""" , __A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCAmelCase_ ( self : List[Any] , __A : int ):
__A : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__A : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__A : str = (1 + step) / (10 + step)
__A : Dict = min(__A , self.decay )
# make sure decay is not smaller than min_decay
__A : Dict = max(__A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : List[str] , __A : Iterable[torch.nn.Parameter] ):
if isinstance(__A , torch.nn.Module ):
__A : Tuple = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __A , standard_warn=__A , )
__A : Union[str, Any] = parameters.parameters()
__A : Any = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__A : List[Any] = self.get_decay(self.optimization_step )
__A : Optional[int] = decay
__A : Union[str, Any] = 1 - decay
__A : int = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__A : List[str] = deepspeed.zero.GatheredParameters(__A , modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCAmelCase_ ( self : str , __A : Iterable[torch.nn.Parameter] ):
__A : Union[str, Any] = list(__A )
for s_param, param in zip(self.shadow_params , __A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Any , __A : List[Any]=None , __A : List[Any]=None ):
__A : Tuple = [
p.to(device=__A , dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Tuple ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Union[str, Any] , __A : Iterable[torch.nn.Parameter] ):
__A : List[str] = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : Optional[Any] , __A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __A ):
param.data.copy_(c_param.data )
# Better memory-wise.
__A : Any = None
def lowerCAmelCase_ ( self : Any , __A : dict ):
__A : Optional[Any] = copy.deepcopy(__A )
__A : Optional[Any] = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
__A : int = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __A ):
raise ValueError("""Invalid min_decay""" )
__A : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __A ):
raise ValueError("""Invalid optimization_step""" )
__A : List[Any] = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __A ):
raise ValueError("""Invalid update_after_step""" )
__A : int = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __A ):
raise ValueError("""Invalid use_ema_warmup""" )
__A : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
__A : List[Any] = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
__A : Dict = state_dict.get("""shadow_params""" , __A )
if shadow_params is not None:
__A : Union[str, Any] = shadow_params
if not isinstance(self.shadow_params , __A ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 17 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : int , a_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(a_ , a_ ):
lowerCamelCase__ = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Dict , a_ : Any , a_ : Optional[int] , a_ : Tuple ):
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(a_ ) )
if isinstance(a_ , a_ ):
lowerCamelCase__ = [sequences]
lowerCamelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Tuple=ZeroShotClassificationArgumentHandler() , *a_ : Optional[Any] , **a_ : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _UpperCamelCase ( self : str , a_ : List[Any] , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : str=TruncationStrategy.ONLY_FIRST , **a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCamelCase__ = self.tokenizer.eos_token
try:
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCamelCase ( self : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
if kwargs.get("""multi_class""" , a_ ) is not None:
lowerCamelCase__ = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCamelCase__ = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ = kwargs["""hypothesis_template"""]
lowerCamelCase__ = {}
if "multi_label" in kwargs:
lowerCamelCase__ = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , a_ : Union[str, List[str]] , *a_ : str , **a_ : Dict , ):
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(a_ , **a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : int , a_ : List[str]=None , a_ : Tuple="This example is {}." ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
lowerCamelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def _UpperCamelCase ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = inputs["""candidate_label"""]
lowerCamelCase__ = inputs["""sequence"""]
lowerCamelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ = self.model(**a_ )
lowerCamelCase__ = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCamelCase__ = [outputs["""sequence"""] for outputs in model_outputs]
lowerCamelCase__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCamelCase__ = logits.shape[0]
lowerCamelCase__ = len(a_ )
lowerCamelCase__ = N // n
lowerCamelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ = self.entailment_id
lowerCamelCase__ = -1 if entailment_id == 0 else 0
lowerCamelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 165 | 0 |
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_ , x % y )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : int = 2_0 ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
UpperCamelCase : int = lcm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 717 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ReformerTokenizer
UpperCamelCase_ = ReformerTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] ='''<s>'''
lowercase : List[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Any =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase ) , 1000 )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : List[str] =self.get_tokenizer()
lowercase : int =self.get_rust_tokenizer()
lowercase : Any ='''I was born in 92000, and this is falsé.'''
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase )
lowercase : List[Any] =rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase : Optional[int] =rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : str =tokenizer.encode(UpperCAmelCase )
lowercase : Any =rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : int=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowercase : List[str] ='''This is a simple input'''
lowercase : List[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[int] =('''This is a simple input''', '''This is a pair''')
lowercase : List[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def A__ ( self : str ) -> int:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowercase : Any =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Dict =tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : str =tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''Hello World!'''
lowercase : Any =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : Union[str, Any] =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase : List[Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Union[str, Any] =''' '''.join(UpperCAmelCase )
lowercase : int =self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='''pt''' )
lowercase : List[str] =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowercase : Optional[Any] =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase : str =encoded_sequence['''input_ids'''].shape
lowercase : Any =ReformerModel(UpperCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase : Optional[Any] =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase , sequences=UpperCAmelCase , )
| 94 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = np.shape(__snake_case )
__SCREAMING_SNAKE_CASE = np.shape(__snake_case )
__SCREAMING_SNAKE_CASE = np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
__SCREAMING_SNAKE_CASE = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
__SCREAMING_SNAKE_CASE = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__snake_case )
__SCREAMING_SNAKE_CASE = pseudo_inv
if a_inv is None:
try:
__SCREAMING_SNAKE_CASE = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A__( unittest.TestCase ):
def _a ( self : Tuple ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
__SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] )
__SCREAMING_SNAKE_CASE = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.block([[a, b], [b.T, c]] )
__SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def _a ( self : List[Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
__SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
__SCREAMING_SNAKE_CASE = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 713 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 690 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if b < 0:
return 1 / actual_power(__lowerCamelCase , __lowerCamelCase )
return actual_power(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 5 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 1 |
import math
import sys
import cva
import numpy as np
def lowercase_ (A : np.ndarray , A : float ):
# For applying gaussian function for each element in matrix.
snake_case__ : List[str] = math.sqrt(A )
snake_case__ : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowercase_ (A : np.ndarray , A : int , A : int , A : int ):
snake_case__ : Union[str, Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowercase_ (A : int , A : float ):
# Creates a gaussian kernel of given dimension.
snake_case__ : Tuple = np.zeros((kernel_size, kernel_size) )
for i in range(0 , A ):
for j in range(0 , A ):
snake_case__ : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(A , A )
def lowercase_ (A : np.ndarray , A : float , A : float , A : int , ):
snake_case__ : str = np.zeros(img.shape )
snake_case__ : Dict = get_gauss_kernel(A , A )
snake_case__ : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case__ : Any = get_slice(A , A , A , A )
snake_case__ : Tuple = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case__ : Dict = vec_gaussian(A , A )
snake_case__ : Tuple = np.multiply(A , A )
snake_case__ : Union[str, Any] = np.multiply(A , A )
snake_case__ : Union[str, Any] = np.sum(A ) / np.sum(A )
snake_case__ : List[Any] = val
return imga
def lowercase_ (A : list ):
snake_case__ : Dict = args[1] if args[1:] else '../image_data/lena.jpg'
snake_case__ : Optional[Any] = float(args[2] ) if args[2:] else 1.0
snake_case__ : List[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case__ : List[str] = int(args[4] )
snake_case__ : Tuple = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case__ : List[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a_ :str = parse_args(sys.argv)
a_ :str = cva.imread(filename, 0)
cva.imshow('input image', img)
a_ :List[str] = img / 255
a_ :Optional[Any] = out.astype('float32')
a_ :List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a_ :List[str] = out * 255
a_ :Union[str, Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 721 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return 3_2
@property
def lowercase_ ( self : int ) ->str:
return 3_2
@property
def lowercase_ ( self : Any ) ->List[str]:
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Tuple ) ->int:
return 1_0_0
@property
def lowercase_ ( self : str ) ->Dict:
snake_case__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self : Any ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : str = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
snake_case__ : Optional[Any] = MultilingualCLIP(_snake_case )
snake_case__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Tuple ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase_ ( self : Dict ) ->Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
torch.manual_seed(0 )
snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Any ) ->Any:
snake_case__ : int = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : Any = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : int = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, )
snake_case__ : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self : str, _snake_case : Any, _snake_case : int=0 ) ->str:
snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : str = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Any = np.ones((6_4, 6_4), dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(_snake_case ).startswith('mps' ):
snake_case__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
snake_case__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : int = 'cpu'
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**_snake_case )
snake_case__ : Optional[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case__ : List[Any] = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Any ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Union[str, Any] = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = 'a hat'
snake_case__ : Any = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa )
snake_case__ : Tuple = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case__ : Optional[Any] = pipeline(
_snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
snake_case__ : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 243 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
def lowerCamelCase__ ( __A :int ,__A :float ,__A :float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : List[str] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a_ : Union[str, Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase_ = "lm_head"
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Union[str, Any]=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load_from_json(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCamelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ = 42
lowerCamelCase_ = 43
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase_ , )
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = UniSpeechForCTC(UpperCAmelCase_ )
else:
lowerCamelCase_ = UniSpeechForPreTraining(UpperCAmelCase_ )
if is_finetuned:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_unispeech.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 445 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "unispeech"
def __init__( self , UpperCamelCase=32 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=16 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=320 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=100 , UpperCamelCase=256 , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=80 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=0.5 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_ctc_classes
lowerCamelCase_ = vocab_size
lowerCamelCase_ = do_stable_layer_norm
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ = num_codevectors_per_group
lowerCamelCase_ = num_codevector_groups
lowerCamelCase_ = contrastive_logits_temperature
lowerCamelCase_ = feat_quantizer_dropout
lowerCamelCase_ = num_negatives
lowerCamelCase_ = codevector_dim
lowerCamelCase_ = proj_codevector_dim
lowerCamelCase_ = diversity_loss_weight
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# pretraining loss
lowerCamelCase_ = replace_prob
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445 | 1 |
'''simple docstring'''
from PIL import Image
def _snake_case ( _SCREAMING_SNAKE_CASE : Image , _SCREAMING_SNAKE_CASE : int ) -> Image:
"""simple docstring"""
lowerCAmelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCAmelCase = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 433 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowercase__ ( __lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowercase__ ( __lowercase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = np.max(_outputs , axis=-1 , keepdims=__lowercase )
__UpperCamelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="sigmoid"
SCREAMING_SNAKE_CASE_ : Any ="softmax"
SCREAMING_SNAKE_CASE_ : Optional[int] ="none"
@add_end_docstrings(
__lowerCamelCase , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Dict =ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **__A : Dict ):
super().__init__(**__A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self : Optional[int] , __A : int=None , __A : List[str]=None , __A : Any="" , **__A : List[Any] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__UpperCamelCase = tokenizer_kwargs
__UpperCamelCase = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
__UpperCamelCase = self.model.config.return_all_scores
if isinstance(__A , __A ) or top_k is None:
__UpperCamelCase = top_k
__UpperCamelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __A , )
if return_all_scores:
__UpperCamelCase = None
else:
__UpperCamelCase = 1
if isinstance(__A , __A ):
__UpperCamelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__UpperCamelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *__A : int , **__A : Tuple ):
__UpperCamelCase = super().__call__(*__A , **__A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__UpperCamelCase = 'top_k' not in kwargs
if isinstance(args[0] , __A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[int] , **__A : str ):
__UpperCamelCase = self.framework
if isinstance(__A , __A ):
return self.tokenizer(**__A , return_tensors=__A , **__A )
elif isinstance(__A , __A ) and len(__A ) == 1 and isinstance(inputs[0] , __A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__A , **__A )
elif isinstance(__A , __A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__A , return_tensors=__A , **__A )
def _lowerCamelCase ( self : str , __A : Tuple ):
return self.model(**__A )
def _lowerCamelCase ( self : int , __A : Tuple , __A : Any=None , __A : int=1 , __A : Optional[int]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__UpperCamelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__UpperCamelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
__UpperCamelCase = self.model.config.function_to_apply
else:
__UpperCamelCase = ClassificationFunction.NONE
__UpperCamelCase = model_outputs['logits'][0]
__UpperCamelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__UpperCamelCase = sigmoid(__A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__UpperCamelCase = softmax(__A )
elif function_to_apply == ClassificationFunction.NONE:
__UpperCamelCase = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__UpperCamelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__A )
]
if not _legacy:
dict_scores.sort(key=lambda __A : x["score"] , reverse=__A )
if top_k is not None:
__UpperCamelCase = dict_scores[:top_k]
return dict_scores
| 399 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _snake_case (unittest.TestCase):
def __init__( self ,_snake_case ,_snake_case=7 ,_snake_case=3 ,_snake_case=18 ,_snake_case=30 ,_snake_case=4_00 ,_snake_case=True ,_snake_case=None ,_snake_case=True ,):
UpperCAmelCase_ : Optional[int] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : List[str] = do_resize
UpperCAmelCase_ : Dict = size
UpperCAmelCase_ : List[str] = do_normalize
def UpperCamelCase__ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"clusters" ) )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,obj[key] ) )
else:
self.assertEqual(obj[key] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Dict = os.path.join(_snake_case ,"image_processor.json" )
image_processor_first.to_json_file(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_json_file(_snake_case ).to_dict()
UpperCAmelCase_ : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
UpperCAmelCase_ : Dict = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
UpperCAmelCase_ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_snake_case )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self ):
pass
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase_ : Any = Image.open(dataset[4]["file"] )
UpperCAmelCase_ : str = Image.open(dataset[5]["file"] )
UpperCAmelCase_ : str = [imagea, imagea]
return images
@require_vision
@require_torch
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase_ : str = prepare_images()
# test non-batched
UpperCAmelCase_ : Tuple = image_processing(images[0] ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 10_24) )
UpperCAmelCase_ : Any = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,_snake_case )
# test batched
UpperCAmelCase_ : List[Any] = image_processing(_snake_case ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 10_24) )
UpperCAmelCase_ : Optional[Any] = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,_snake_case )
| 323 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self ,_snake_case ,):
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : List[str] = 13
UpperCAmelCase_ : str = 7
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = 99
UpperCAmelCase_ : Tuple = 32
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : List[Any] = 37
UpperCAmelCase_ : int = "gelu"
UpperCAmelCase_ : Any = 0.1
UpperCAmelCase_ : Optional[Any] = 0.1
UpperCAmelCase_ : List[Any] = 5_12
UpperCAmelCase_ : Optional[Any] = 16
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : List[Any] = 0.02
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : Optional[int] = None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = TFEsmModel(config=_snake_case )
UpperCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : List[Any] = model(_snake_case )
UpperCAmelCase_ : Optional[int] = [input_ids, input_mask]
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
UpperCAmelCase_ : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[Any] = TFEsmModel(config=_snake_case )
UpperCAmelCase_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCAmelCase_ : Tuple = model(_snake_case )
UpperCAmelCase_ : Any = [input_ids, input_mask]
UpperCAmelCase_ : List[Any] = model(_snake_case ,encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = TFEsmForMaskedLM(config=_snake_case )
UpperCAmelCase_ : Optional[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = self.num_labels
UpperCAmelCase_ : List[str] = TFEsmForTokenClassification(config=_snake_case )
UpperCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : Dict = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =(
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__A : int =(
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : Tuple =False
__A : Optional[Any] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = TFEsmModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip("Protein models do not support embedding resizing." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Protein models do not support embedding resizing." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_ : int = model.get_bias()
assert isinstance(_snake_case ,_snake_case )
for k, v in name.items():
assert isinstance(_snake_case ,tf.Variable )
else:
UpperCAmelCase_ : int = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : int = model.get_bias()
assert name is None
@require_tf
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase_ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Optional[int] = model(_snake_case )[0]
UpperCAmelCase_ : str = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_snake_case )
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase_ : Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[Any] = model(_snake_case )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 323 | 1 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( snake_case : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( snake_case : np.ndarray ):
return vector * sigmoid(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def UpperCAmelCase ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any]=0 ):
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase:Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase:Any = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
_lowerCAmelCase:int = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase:Optional[Any] = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
_lowerCAmelCase:str = os.path.join(snake_case , snake_case )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase:Tuple = os.path.join(snake_case , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'Saving model to {ckpt_dir}' )
_lowerCAmelCase:Tuple = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def UpperCAmelCase ( snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : int=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCAmelCase:Optional[int] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
_lowerCAmelCase:List[Any] = os.path.join(snake_case , snake_case )
logger.info(F'Loading model from {input_model_file}' )
_lowerCAmelCase:List[Any] = torch.load(snake_case )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase:str = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
_lowerCAmelCase:Union[str, Any] = os.path.join(snake_case , snake_case )
logger.info(F'Loading model from {input_model_file}' )
_lowerCAmelCase:Dict = torch.load(snake_case )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase:int = (
os.path.join(snake_case , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
_lowerCAmelCase:List[Any] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
_lowerCAmelCase:List[str] = state_dict['''model''']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , snake_case : Any=0 ):
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase:Optional[Any] = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCAmelCase:Dict = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
_lowerCAmelCase:Any = os.path.join(snake_case , snake_case )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
_lowerCAmelCase:Dict = os.path.join(snake_case , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def UpperCAmelCase ( snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase:Dict = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCAmelCase:Any = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
_lowerCAmelCase:List[str] = os.path.join(snake_case , snake_case )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
_lowerCAmelCase:int = torch.load(snake_case )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
_lowerCAmelCase:List[str] = (
os.path.join(snake_case , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
_lowerCAmelCase:Dict = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
_lowerCAmelCase:int = optim_state['''optimizer''']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
_lowerCAmelCase:Optional[Any] = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 227 | 1 |
from math import factorial, pi
def a_ (_lowerCAmelCase : float , _lowerCAmelCase : int = 30 )-> float:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
snake_case: Optional[Any] = float(_lowerCAmelCase )
snake_case: Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowerCAmelCase ) )
def a_ (_lowerCAmelCase : float , _lowerCAmelCase : int = 30 )-> float:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
snake_case: List[str] = float(_lowerCAmelCase )
snake_case: str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 164 | from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def a_ (_lowerCAmelCase : TreeNode | None )-> bool:
# Validation
def is_valid_tree(_lowerCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowerCAmelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
_lowerCAmelCase : TreeNode | None , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float = 0.0
__lowerCamelCase : int = 1
__lowerCamelCase : int = 1
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = []
_lowerCAmelCase = []
for i in range(self.num_layers ):
_lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
_lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
_lowerCAmelCase = resnets
_lowerCAmelCase = attentions
if self.add_downsample:
_lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Any:
_lowerCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
_lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float = 0.0
__lowerCamelCase : int = 1
__lowerCamelCase : bool = True
__lowerCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = []
for i in range(self.num_layers ):
_lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
_lowerCAmelCase = resnets
if self.add_downsample:
_lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> str:
_lowerCAmelCase = ()
for resnet in self.resnets:
_lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float = 0.0
__lowerCamelCase : int = 1
__lowerCamelCase : int = 1
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> int:
_lowerCAmelCase = []
_lowerCAmelCase = []
for i in range(self.num_layers ):
_lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
_lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
_lowerCAmelCase = resnets
_lowerCAmelCase = attentions
if self.add_upsample:
_lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase = res_hidden_states_tuple[-1]
_lowerCAmelCase = res_hidden_states_tuple[:-1]
_lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
_lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
_lowerCAmelCase = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float = 0.0
__lowerCamelCase : int = 1
__lowerCamelCase : bool = True
__lowerCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = []
for i in range(self.num_layers ):
_lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
_lowerCAmelCase = resnets
if self.add_upsample:
_lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Optional[Any]:
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase = res_hidden_states_tuple[-1]
_lowerCAmelCase = res_hidden_states_tuple[:-1]
_lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
_lowerCAmelCase = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : float = 0.0
__lowerCamelCase : int = 1
__lowerCamelCase : int = 1
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
def _snake_case ( self ) -> str:
# there is always at least one resnet
_lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase = []
for _ in range(self.num_layers ):
_lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
_lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
_lowerCAmelCase = resnets
_lowerCAmelCase = attentions
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> List[str]:
_lowerCAmelCase = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
_lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
return hidden_states
| 18 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "" ):
A_ : Union[str, Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : Dict = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
A_ : List[str] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : Optional[int] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ):
A_ : Optional[Any] = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as out_file:
A_ : Union[str, Any] = csv.writer(SCREAMING_SNAKE_CASE )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 590 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( A : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = filter(lambda A : p.requires_grad , model.parameters() )
__snake_case : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : int ) -> Any:
"""simple docstring"""
if metric == "rouge2":
__snake_case : Tuple = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__snake_case : Tuple = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__snake_case : Optional[int] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__snake_case : Union[str, Any] = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__snake_case : str = ModelCheckpoint(
dirpath=A , filename=A , monitor=F"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple ) -> Dict:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , )
class a_ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str:
"""simple docstring"""
__snake_case : int = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__a)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a=True) -> None:
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""")
__snake_case : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
__snake_case : int = Path(pl_module.hparams.output_dir)
if type_path == "test":
__snake_case : Any = od / 'test_results.txt'
__snake_case : Any = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__snake_case : Union[str, Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a)
generations_file.parent.mkdir(exist_ok=__a)
with open(__a , 'a+') as writer:
for key in sorted(__a):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case : Dict = metrics[key]
if isinstance(__a , torch.Tensor):
__snake_case : str = val.item()
__snake_case : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(__a)
if not save_generations:
return
if "preds" in metrics:
__snake_case : Tuple = '\n'.join(metrics['preds'])
generations_file.open('w+').write(__a)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Dict:
"""simple docstring"""
try:
__snake_case : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case : Dict = pl_module.model.num_parameters()
__snake_case : Union[str, Any] = count_trainable_parameters(__a)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__a , __a , 'test')
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 711 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = VQModel
_snake_case = """sample"""
@property
def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Optional[int] = 3
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(__a)
__snake_case : Any = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(__a).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__snake_case : Optional[int] = image.to(__a)
with torch.no_grad():
__snake_case : List[Any] = model(__a).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3)) | 61 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="utf-8") as input_file:
lowerCamelCase__: int =re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
lowerCamelCase__: List[str] =input_file.read()
lowerCamelCase__: int =regexp.search(UpperCAmelCase_)
return match
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="utf-8") as input_file:
lowerCamelCase__: Optional[Any] =re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
lowerCamelCase__: List[str] =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase__: Dict =regexp.finditer(UpperCAmelCase_)
lowerCamelCase__: Dict =[match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =Path("./datasets")
lowerCamelCase__: int =list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Path("./datasets")
lowerCamelCase__: int =list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
| 59 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
UpperCamelCase = tuple[float, float, float]
UpperCamelCase = tuple[float, float, float]
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Vectorad:
lowerCamelCase_ : str = end_pointa[0] - end_pointa[0]
lowerCamelCase_ : Dict = end_pointa[1] - end_pointa[1]
lowerCamelCase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Vectorad:
lowerCamelCase_ : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase_ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase_ : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
return tuple(round(lowerCamelCase__ , lowerCamelCase__ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 10 ) -> bool:
lowerCamelCase_ : int = create_vector(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int = create_vector(lowerCamelCase__ , lowerCamelCase__ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
| 144 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCAmelCase : Tuple = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCAmelCase : Optional[int] = dataset.iloc[:, 1:2].values
UpperCAmelCase : List[str] = dataset.iloc[:, 2].values
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCAmelCase : List[str] = PolynomialFeatures(degree=4)
UpperCAmelCase : Tuple = poly_reg.fit_transform(X)
UpperCAmelCase : Any = LinearRegression()
pol_reg.fit(X_poly, y)
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color="""red""" )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 567 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = 0
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
__lowercase = CLIPImageProcessor(**_UpperCAmelCase )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
config.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowercase = AutoImageProcessor.from_pretrained('clip-base' )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def a__ ( self : int ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = CustomImageProcessor.from_pretrained(_UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = True
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
_a = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 481 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowercase_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase_ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowercase_ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowercase_ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , ):
"""simple docstring"""
def _dataset(lowerCamelCase__ , lowerCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , ref_path=lowerCamelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowercase__ : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowercase__ : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowercase__ : str = AutoModelWithLMHead.from_config(lowerCamelCase__ )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowercase__ : Optional[int] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Optional[int] = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : List[Any] = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , evaluate=lowerCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : Union[str, Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : str = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : str = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , data_collator=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , prediction_loss_only=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ : int = trainer.evaluate()
lowercase__ : str = math.exp(eval_output["eval_loss"] )
lowercase__ : int = {"perplexity": perplexity}
lowercase__ : str = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , lowerCamelCase__ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(lowerCamelCase__ )
return results
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 496 | 0 |
'''simple docstring'''
import string
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__A : Tuple = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__A : Tuple = string.ascii_uppercase.find(__SCREAMING_SNAKE_CASE )
__A : List[Any] = num - key
if num < 0:
__A : str = num + len(string.ascii_uppercase )
__A : Optional[int] = translated + string.ascii_uppercase[num]
else:
__A : Dict = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A_ ( ) -> None:
"""simple docstring"""
__A : Optional[Any] = input("""Encrypted message: """ )
__A : str = message.upper()
decrypt(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 499 |
'''simple docstring'''
A__ : List[Any] =[
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 499 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaControlnetPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : str = self.dummy_unet
snake_case__ : Optional[int] = self.dummy_movq
snake_case__ : Dict = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create hint
snake_case__ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = """cpu"""
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = output.images
snake_case__ : int = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Tuple = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case__ : List[Any] = torch.from_numpy(np.array(__SCREAMING_SNAKE_CASE ) ).float() / 255.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = """A robot, 4k photo"""
snake_case__ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ , snake_case__ : str = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : List[Any] = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , hint=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="""np""" , )
snake_case__ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )]
lowerCAmelCase_: Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowercase ) <= key:
return input_string
for position, character in enumerate(lowercase ):
lowerCAmelCase_: Optional[Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Any = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase )
lowerCAmelCase_: Optional[int] = ["".join(lowercase ) for row in temp_grid]
lowerCAmelCase_: int = "".join(lowercase )
return output_string
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Tuple = []
lowerCAmelCase_: str = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )] # generates template
for position in range(len(lowercase ) ):
lowerCAmelCase_: List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCAmelCase_: Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_: Tuple = input_string[counter : counter + len(lowercase )]
grid.append(list(lowercase ) )
counter += len(lowercase )
lowerCAmelCase_: int = "" # reads as zigzag
for position in range(len(lowercase ) ):
lowerCAmelCase_: str = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case__ ( lowercase ):
lowerCAmelCase_: Dict = {}
for key_guess in range(1 , len(lowercase ) ): # tries every key
lowerCAmelCase_: int = decrypt(lowercase , lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 613 | 0 |
def a ( A__ : str , A__ : list[str] ) -> str:
"""simple docstring"""
_lowercase =''
for word_or_phrase in separated:
if not isinstance(A__ , A__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 380 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 380 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 568 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = 'openai/whisper-base'
A : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A : Dict = 'transcriber'
A : Any = WhisperProcessor
A : Any = WhisperForConditionalGeneration
A : Union[str, Any] = ['audio']
A : Optional[int] = ['text']
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 568 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 706 |
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [1]
SCREAMING_SNAKE_CASE_ = 0, 0, 0
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 5
for _ in range(1, lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = min(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
ugly_nums.append(lowerCAmelCase_ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''') | 626 |
from math import factorial
def snake_case_ ( lowerCAmelCase_ : int = 100 ):
return sum(map(lowerCAmelCase_ , str(factorial(lowerCAmelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 149 | 0 |
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = set_counts
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1] * num_sets
UpperCamelCase__ = list(range(SCREAMING_SNAKE_CASE_ ) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
UpperCamelCase__ = self.get_parent(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_parent(SCREAMING_SNAKE_CASE_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = src_parent
UpperCamelCase__ = self.set_counts[src_parent]
UpperCamelCase__ = max(self.max_set , SCREAMING_SNAKE_CASE_ )
return True
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 469 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ='''▁'''
__magic_name__ ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__magic_name__ ={
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__magic_name__ =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ = 1
UpperCamelCase__ = len(self.sp_model )
UpperCamelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
UpperCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
UpperCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a (self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a (self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1] * len(self.prefix_tokens )
UpperCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ = src_lang
UpperCamelCase__ = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tgt_lang_id
return inputs
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
UpperCamelCase__ = src_lang
UpperCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a (self ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
| 469 | 1 |
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> bool:
"""simple docstring"""
_UpperCamelCase = get_failure_array(lowerCAmelCase )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def __A(lowerCAmelCase ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = "abc1abc12"
lowerCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowerCamelCase__ = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = "ABABX"
lowerCamelCase__ = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = "AAAB"
lowerCamelCase__ = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = "abcdabcy"
lowerCamelCase__ = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 612 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : Optional[int] = 10
def A_ ( self , **a ) -> int:
'''simple docstring'''
_UpperCamelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a )
return config
def A_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def A_ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if str(a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 612 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""PerceiverFeatureExtractor"""]
_lowercase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427 |
'''simple docstring'''
import argparse
import os
import re
_lowercase = """src/transformers"""
# Pattern that looks at the indentation in a line.
_lowercase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase__ ( a ):
__snake_case = _re_indent.search(a )
return "" if search is None else search.groups()[0]
def lowerCamelCase__ ( a , a="" , a=None , a=None ):
__snake_case = 0
__snake_case = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(a ):
index += 1
__snake_case = ['\n'.join(lines[:index] )]
else:
__snake_case = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__snake_case = [lines[index]]
index += 1
while index < len(a ) and (end_prompt is None or not lines[index].startswith(a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(a ) )
if index < len(a ) - 1:
__snake_case = [lines[index + 1]]
index += 1
else:
__snake_case = []
else:
blocks.append('\n'.join(a ) )
__snake_case = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a ) > 0:
blocks.append('\n'.join(a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowerCamelCase__ ( a ):
def _inner(a ):
return key(a ).lower().replace('_' , '' )
return _inner
def lowerCamelCase__ ( a , a=None ):
# If no key is provided, we use a noop.
def noop(a ):
return x
if key is None:
__snake_case = noop
# Constants are all uppercase, they go first.
__snake_case = [obj for obj in objects if key(a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__snake_case = [obj for obj in objects if key(a )[0].isupper() and not key(a ).isupper()]
# Functions begin with a lowercase, they go last.
__snake_case = [obj for obj in objects if not key(a )[0].isupper()]
__snake_case = ignore_underscore(a )
return sorted(a , key=a ) + sorted(a , key=a ) + sorted(a , key=a )
def lowerCamelCase__ ( a ):
# This inner function sort imports between [ ].
def _replace(a ):
__snake_case = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__snake_case = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(a )] ) + "]"
__snake_case = import_statement.split('\n' )
if len(a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__snake_case = 2 if lines[1].strip() == '[' else 1
__snake_case = [(i, _re_strip_line.search(a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__snake_case = sort_objects(a , key=lambda a : x[1] )
__snake_case = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__snake_case = _re_bracket_content.sub(_replace , lines[1] )
else:
__snake_case = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
__snake_case = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(a )] )
return "\n".join(a )
else:
# Finally we have to deal with imports fitting on one line
__snake_case = _re_bracket_content.sub(_replace , a )
return import_statement
def lowerCamelCase__ ( a , a=True ):
with open(a , encoding='utf-8' ) as f:
__snake_case = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__snake_case = split_code_in_indented_blocks(
a , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__snake_case = main_blocks[block_idx]
__snake_case = block.split('\n' )
# Get to the start of the imports.
__snake_case = 0
while line_idx < len(a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__snake_case = len(a )
else:
line_idx += 1
if line_idx >= len(a ):
continue
# Ignore beginning and last line: they don't contain anything.
__snake_case = '\n'.join(block_lines[line_idx:-1] )
__snake_case = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__snake_case = split_code_in_indented_blocks(a , indent_level=a )
# We have two categories of import key: list or _import_structure[key].append/extend
__snake_case = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__snake_case = [(pattern.search(a ).groups()[0] if pattern.search(a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__snake_case = [(i, key) for i, key in enumerate(a ) if key is not None]
__snake_case = [x[0] for x in sorted(a , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__snake_case = 0
__snake_case = []
for i in range(len(a ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a )
count += 1
# And we put our main block back together with its first and last line.
__snake_case = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(a ) )
def lowerCamelCase__ ( a=True ):
__snake_case = []
for root, _, files in os.walk(a ):
if "__init__.py" in files:
__snake_case = sort_imports(os.path.join(a , '__init__.py' ) , check_only=a )
if result:
__snake_case = [os.path.join(a , '__init__.py' )]
if len(a ) > 0:
raise ValueError(f'Would overwrite {len(a )} files, run `make style`.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 427 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = PegasusConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : str = '''gelu'''
def __init__( self : int , snake_case : Union[str, Any] , snake_case : Any=13 , snake_case : int=7 , snake_case : List[str]=True , snake_case : str=False , snake_case : str=99 , snake_case : List[str]=32 , snake_case : Dict=2 , snake_case : Optional[int]=4 , snake_case : Union[str, Any]=37 , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : str=40 , snake_case : Dict=2 , snake_case : str=1 , snake_case : int=0 , ):
"""simple docstring"""
_snake_case : Any = parent
_snake_case : Tuple = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : int = is_training
_snake_case : List[str] = use_labels
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : List[Any] = intermediate_size
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : List[str] = eos_token_id
_snake_case : int = pad_token_id
_snake_case : List[Any] = bos_token_id
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case : Optional[int] = prepare_pegasus_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __UpperCAmelCase ( self : List[Any] , snake_case : List[str] , snake_case : Optional[Any] ):
"""simple docstring"""
_snake_case : str = TFPegasusModel(config=_A ).get_decoder()
_snake_case : int = inputs_dict['input_ids']
_snake_case : int = input_ids[:1, :]
_snake_case : List[Any] = inputs_dict['attention_mask'][:1, :]
_snake_case : int = inputs_dict['head_mask']
_snake_case : Union[str, Any] = 1
# first forward pass
_snake_case : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
_snake_case : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Tuple = model(_A , attention_mask=_A )[0]
_snake_case : Optional[int] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1e-3 )
def lowerCamelCase__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_snake_case : Optional[int] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
_snake_case : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
_snake_case : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : str = False
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = TFPegasusModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=_A )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE__ : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE__ : Any = '''google/pegasus-xsum'''
@cached_property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCAmelCase ( self : int , **snake_case : Dict ):
"""simple docstring"""
_snake_case : List[Any] = self.translate_src_text(**_A )
assert self.expected_text == generated_words
def __UpperCAmelCase ( self : List[Any] , **snake_case : Dict ):
"""simple docstring"""
_snake_case : Optional[Any] = self.tokenizer(self.src_text , **_A , padding=_A , return_tensors='tf' )
_snake_case : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
_snake_case : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )
return generated_words
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 517 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_ = "src/transformers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*try:")
# Catches a line with else:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*else:")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
__a : Optional[Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Any = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
__a : List[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__a : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
__a : int = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__a : Union[str, Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
__a : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__a : Optional[Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__a : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__a : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : Optional[Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Any = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : List[Any] = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Union[str, Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__a : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__a : Any = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__a : Any = lines[line_index]
__a : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__a : List[Any] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__a : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__a : int = lines[line_index]
__a : List[Any] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__a : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__a : Any = []
for key in import_dict_objects.keys():
__a : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__a : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__a : List[Any] = 'base imports' if key == 'none' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCAmelCase__ ( ):
__a : List[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
__a : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' )
__a : Optional[Any] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
__a : str = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Optional[int] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE__ ) )
def lowerCAmelCase__ ( ):
__a : Optional[Any] = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__a : Optional[Any] = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
__a : str = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
SCREAMING_SNAKE_CASE_ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowerCAmelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
__a : int = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__a : Optional[Any] = spec.loader.load_module()
__a : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Union[str, Any] = '\n'.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 597 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 255 , lowerCAmelCase_ = True , lowerCAmelCase_ = 8 , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
_lowercase =do_rescale
_lowercase =rescale_factor
_lowercase =do_pad
_lowercase =pad_size
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase , _lowercase =get_image_size(lowerCAmelCase_ )
_lowercase =(old_height // size + 1) * size - old_height
_lowercase =(old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
_lowercase =do_rescale if do_rescale is not None else self.do_rescale
_lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase =do_pad if do_pad is not None else self.do_pad
_lowercase =pad_size if pad_size is not None else self.pad_size
_lowercase =make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
_lowercase =[to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_lowercase =[self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_lowercase =[self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_lowercase =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_lowercase ={"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 594 | import random
from .binary_exp_mod import bin_exp_mod
def __lowerCamelCase ( __a : List[Any] , __a : Optional[Any]=1_000 ) -> Union[str, Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_lowercase =n - 1
_lowercase =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_lowercase =0
while count < prec:
_lowercase =random.randint(2 , n - 1 )
_lowercase =bin_exp_mod(__a , __a , __a )
if b != 1:
_lowercase =True
for _ in range(__a ):
if b == n - 1:
_lowercase =False
break
_lowercase =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 594 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: str=None , UpperCamelCase: Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class a :
lowercase_ : Optional[Any] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
lowercase_ : Dict = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
lowercase_ : str = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
lowercase_ : int = field(
default=snake_case_ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
lowercase_ : str = field(
default=snake_case_ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
lowercase_ : Any = field(
default=snake_case_ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
lowercase_ : Any = field(default=snake_case_ , metadata={'help': 'Use FP16 to accelerate inference.'} )
lowercase_ : Optional[Any] = field(default=snake_case_ , metadata={'help': 'Benchmark training of model'} )
lowercase_ : List[str] = field(default=snake_case_ , metadata={'help': 'Verbose memory tracing'} )
lowercase_ : Any = field(
default=snake_case_ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
lowercase_ : Optional[Any] = field(
default=snake_case_ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
lowercase_ : int = field(default=snake_case_ , metadata={'help': 'Trace memory line by line'} )
lowercase_ : Any = field(default=snake_case_ , metadata={'help': 'Save result to a CSV file'} )
lowercase_ : Dict = field(default=snake_case_ , metadata={'help': 'Save all print statements in a log file'} )
lowercase_ : int = field(default=snake_case_ , metadata={'help': 'Whether to print environment information'} )
lowercase_ : Dict = field(
default=snake_case_ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
lowercase_ : Union[str, Any] = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
lowercase_ : Optional[Any] = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
lowercase_ : List[Any] = field(
default=f'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
lowercase_ : Optional[Any] = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
lowercase_ : Dict = field(
default=f'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , )
lowercase_ : Tuple = field(
default=f'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
lowercase_ : List[Any] = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
lowercase_ : str = field(
default=snake_case_ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A_ , )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=3_2 , snake_case_=3 , snake_case_=1_0 , snake_case_=[1_0, 2_0, 3_0, 4_0] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ) -> Any:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Dict:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
'''simple docstring'''
__lowercase = TFRegNetModel(config=snake_case_ )
__lowercase = model(snake_case_ , training=snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFRegNetForImageClassification(snake_case_ )
__lowercase = model(snake_case_ , labels=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFRegNetModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A ( self ) -> int:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def A ( self ) -> List[str]:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = model_class(snake_case_ )
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) , training=snake_case_ )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase = layer_type
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case_ , snake_case_ , snake_case_ , snake_case_={} ):
__lowercase = model(snake_case_ , return_dict=snake_case_ , **snake_case_ )
__lowercase = model(snake_case_ , return_dict=snake_case_ , **snake_case_ ).to_tuple()
def recursive_check(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case_ , snake_case_ ):
recursive_check(snake_case_ , snake_case_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(snake_case_ , snake_case_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(snake_case_ , snake_case_ )
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {'''output_hidden_states''': True} )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {'''output_hidden_states''': True} )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def A ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFRegNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self ) -> int:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''tf''' )
# forward pass
__lowercase = model(**snake_case_ , training=snake_case_ )
# verify the logits
__lowercase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1e-4 )
| 527 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 527 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Any , a_ : List[str] , a_ : Dict=7 , a_ : List[str]=3 , a_ : Union[str, Any]=18 , a_ : str=30 , a_ : Tuple=400 , a_ : List[Any]=True , a_ : Tuple=None , a_ : Dict=True , a_ : Union[str, Any]=False , a_ : str=True , a_ : int=True , a_ : Optional[int]=[0.5, 0.5, 0.5] , a_ : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size if size is not None else {'''height''': 18, '''width''': 20}
__snake_case = do_thumbnail
__snake_case = do_align_axis
__snake_case = do_pad
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def A ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = DonutImageProcessingTester(self )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
self.assertTrue(hasattr(A_ , "do_thumbnail" ) )
self.assertTrue(hasattr(A_ , "do_align_long_axis" ) )
self.assertTrue(hasattr(A_ , "do_pad" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def A ( self : List[str] ):
"""simple docstring"""
pass
@is_flaky()
def A ( self : int ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 69 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCamelCase , max_perimeter + 1 ):
_lowercase: str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCamelCase ):
_lowercase: str = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( _UpperCamelCase = 1_000 ):
"""simple docstring"""
_lowercase: int = pythagorean_triple(_UpperCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 353 | 0 |
def _lowerCamelCase ( A_ : int , A_ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 582 |
def _lowerCamelCase ( A_ : int , A_ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 582 | 1 |
def _UpperCamelCase (a__ :int = 100 ):
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = 0
UpperCamelCase__ = n + 1 # maximum limit
for a in range(2 , a__ ):
for b in range(2 , a__ ):
UpperCamelCase__ = a**b # calculates the current power
collect_powers.add(a__ ) # adds the result to the set
return len(a__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 619 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = VideoClassificationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
for example in examples:
UpperCamelCase__ = video_classifier(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCamelCase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCamelCase__ = pipeline(
"""video-classification""" , model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , frame_sampling_rate=4 )
UpperCamelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = video_classifier(__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
UpperCamelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass
| 619 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''unispeech-sat'''
def __init__( self ,__UpperCAmelCase=32 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase="group" ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCAmelCase=False ,__UpperCAmelCase=128 ,__UpperCAmelCase=16 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_5 ,__UpperCAmelCase=10 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=10 ,__UpperCAmelCase=0 ,__UpperCAmelCase=320 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=100 ,__UpperCAmelCase=256 ,__UpperCAmelCase=256 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="mean" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=256 ,__UpperCAmelCase=(512, 512, 512, 512, 1500) ,__UpperCAmelCase=(5, 3, 3, 1, 1) ,__UpperCAmelCase=(1, 2, 3, 1, 1) ,__UpperCAmelCase=512 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=504 ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(**__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : List[Any] = feat_extract_norm
lowerCAmelCase__ : Dict = feat_extract_activation
lowerCAmelCase__ : List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ : Dict = list(__UpperCAmelCase )
lowerCAmelCase__ : Dict = list(__UpperCAmelCase )
lowerCAmelCase__ : int = conv_bias
lowerCAmelCase__ : Union[str, Any] = num_conv_pos_embeddings
lowerCAmelCase__ : List[str] = num_conv_pos_embedding_groups
lowerCAmelCase__ : Union[str, Any] = len(self.conv_dim )
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = hidden_dropout
lowerCAmelCase__ : int = attention_dropout
lowerCAmelCase__ : int = activation_dropout
lowerCAmelCase__ : List[str] = feat_proj_dropout
lowerCAmelCase__ : str = final_dropout
lowerCAmelCase__ : Dict = layerdrop
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : List[Any] = num_clusters
lowerCAmelCase__ : Tuple = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Any = apply_spec_augment
lowerCAmelCase__ : Dict = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Optional[int] = mask_time_min_masks
lowerCAmelCase__ : str = mask_feature_prob
lowerCAmelCase__ : List[Any] = mask_feature_length
lowerCAmelCase__ : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : Dict = num_codevectors_per_group
lowerCAmelCase__ : Union[str, Any] = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : Dict = feat_quantizer_dropout
lowerCAmelCase__ : str = num_negatives
lowerCAmelCase__ : Dict = codevector_dim
lowerCAmelCase__ : Tuple = proj_codevector_dim
lowerCAmelCase__ : Union[str, Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction
lowerCAmelCase__ : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = list(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = xvector_output_dim
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 160 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a
return b
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
lowerCAmelCase__ : List[Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCAmelCase__ : Optional[int] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 160 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : int , a__ : int , a__ : int , a__ : str=0.0 , a__ : Optional[int] = None , a__ : str = "geglu" , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : str = "layer_norm" , a__ : bool = False , ):
super().__init__()
UpperCAmelCase = only_cross_attention
UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase = AdaLayerNorm(a__ , a__ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase = AdaLayerNormZero(a__ , a__ )
else:
UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
UpperCAmelCase = Attention(
query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase = (
AdaLayerNorm(a__ , a__ )
if self.use_ada_layer_norm
else nn.LayerNorm(a__ , elementwise_affine=a__ )
)
UpperCAmelCase = Attention(
query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase = None
UpperCAmelCase = None
# 3. Feed-forward
UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
UpperCAmelCase = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ )
# let chunk size default to None
UpperCAmelCase = None
UpperCAmelCase = 0
def __snake_case ( self : Tuple , a__ : Optional[int] , a__ : int ):
# Sets chunk feed-forward
UpperCAmelCase = chunk_size
UpperCAmelCase = dim
def __snake_case ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Dict[str, Any] = None , a__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase = self.norma(a__ , a__ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = self.norma(
a__ , a__ , a__ , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase = self.norma(a__ )
UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase = self.attna(
a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase = (
self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ )
)
UpperCAmelCase = self.attna(
a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , )
UpperCAmelCase = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase = self.norma(a__ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase = torch.cat(
[self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase = self.ff(a__ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase = ff_output + hidden_states
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a__ : int , a__ : Optional[int] = None , a__ : int = 4 , a__ : float = 0.0 , a__ : str = "geglu" , a__ : bool = False , ):
super().__init__()
UpperCAmelCase = int(dim * mult )
UpperCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase = GELU(a__ , a__ )
if activation_fn == "gelu-approximate":
UpperCAmelCase = GELU(a__ , a__ , approximate='''tanh''' )
elif activation_fn == "geglu":
UpperCAmelCase = GEGLU(a__ , a__ )
elif activation_fn == "geglu-approximate":
UpperCAmelCase = ApproximateGELU(a__ , a__ )
UpperCAmelCase = nn.ModuleList([] )
# project in
self.net.append(a__ )
# project dropout
self.net.append(nn.Dropout(a__ ) )
# project out
self.net.append(nn.Linear(a__ , a__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a__ ) )
def __snake_case ( self : Any , a__ : List[Any] ):
for module in self.net:
UpperCAmelCase = module(a__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , a__ : int , a__ : int , a__ : str = "none" ):
super().__init__()
UpperCAmelCase = nn.Linear(a__ , a__ )
UpperCAmelCase = approximate
def __snake_case ( self : List[str] , a__ : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(a__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __snake_case ( self : Union[str, Any] , a__ : List[Any] ):
UpperCAmelCase = self.proj(a__ )
UpperCAmelCase = self.gelu(a__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a__ : int , a__ : int ):
super().__init__()
UpperCAmelCase = nn.Linear(a__ , dim_out * 2 )
def __snake_case ( self : List[Any] , a__ : List[str] ):
if gate.device.type != "mps":
return F.gelu(a__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __snake_case ( self : Dict , a__ : Optional[Any] ):
UpperCAmelCase, UpperCAmelCase = self.proj(a__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a__ )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : int , a__ : int ):
super().__init__()
UpperCAmelCase = nn.Linear(a__ , a__ )
def __snake_case ( self : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = self.proj(a__ )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Tuple , a__ : str ):
super().__init__()
UpperCAmelCase = nn.Embedding(a__ , a__ )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = nn.Linear(a__ , embedding_dim * 2 )
UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
def __snake_case ( self : int , a__ : Union[str, Any] , a__ : Optional[int] ):
UpperCAmelCase = self.linear(self.silu(self.emb(a__ ) ) )
UpperCAmelCase, UpperCAmelCase = torch.chunk(a__ , 2 )
UpperCAmelCase = self.norm(a__ ) * (1 + scale) + shift
return x
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str] , a__ : List[str] ):
super().__init__()
UpperCAmelCase = CombinedTimestepLabelEmbeddings(a__ , a__ )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = nn.Linear(a__ , 6 * embedding_dim , bias=a__ )
UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1e-6 )
def __snake_case ( self : List[str] , a__ : str , a__ : List[str] , a__ : List[str] , a__ : int=None ):
UpperCAmelCase = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = emb.chunk(6 , dim=1 )
UpperCAmelCase = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : int , a__ : int , a__ : int , a__ : Optional[str] = None , a__ : float = 1e-5 ):
super().__init__()
UpperCAmelCase = num_groups
UpperCAmelCase = eps
if act_fn is None:
UpperCAmelCase = None
else:
UpperCAmelCase = get_activation(a__ )
UpperCAmelCase = nn.Linear(a__ , out_dim * 2 )
def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : int ):
if self.act:
UpperCAmelCase = self.act(a__ )
UpperCAmelCase = self.linear(a__ )
UpperCAmelCase = emb[:, :, None, None]
UpperCAmelCase, UpperCAmelCase = emb.chunk(2 , dim=1 )
UpperCAmelCase = F.group_norm(a__ , self.num_groups , eps=self.eps )
UpperCAmelCase = x * (1 + scale) + shift
return x
| 51 |
import os
import numpy
import onnx
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = a.name
A_ : int = b.name
A_ : int = """"""
A_ : Union[str, Any] = """"""
A_ : Tuple = a == b
A_ : Optional[Any] = name_a
A_ : int = name_b
return res
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase ,_lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,_lowerCAmelCase ,_lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g ,_lowerCAmelCase ,_lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[Any] = list(model.graph.initializer )
A_ : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A_ : Optional[int] = inits[i].name
A_ : Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = os.path.dirname(_lowerCAmelCase )
A_ : int = os.path.basename(_lowerCAmelCase )
A_ : Optional[int] = onnx.load(os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) )
A_ : Union[str, Any] = list(model.graph.initializer )
A_ : Tuple = set()
A_ : Tuple = {}
A_ : Optional[int] = []
A_ : List[Any] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
A_ : Any = inits[j].data_type
A_ : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ ,_lowerCAmelCase )
total_reduced_size += mem_size
A_ : Optional[int] = inits[i].name
A_ : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
A_ : Any = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ ,total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 ,"""GB""" )
A_ : List[str] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ : Optional[Any] = """optimized_""" + model_file_name
A_ : Union[str, Any] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
onnx.save(_lowerCAmelCase ,_lowerCAmelCase )
return new_model
| 569 | 0 |
from copy import deepcopy
class a__ :
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
if arr is None and size is not None:
__a = size
__a = [0] * size
elif arr is not None:
self.init(UpperCAmelCase )
else:
raise ValueError('Either arr or size must be specified' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None:
__a = len(UpperCAmelCase )
__a = deepcopy(UpperCAmelCase )
for i in range(1 , self.size ):
__a = self.next_(UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __SCREAMING_SNAKE_CASE ( self ) -> list[int]:
__a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__a = self.next_(UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase ) -> int:
return index + (index & (-index))
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase ) -> int:
return index - (index & (-index))
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__a = self.next_(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
if right == 0:
return 0
__a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__a = self.prev(UpperCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
return self.query(UpperCAmelCase , index + 1 )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
value -= self.tree[0]
if value < 0:
return -1
__a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ : Optional[int] = """CompVis/stable-diffusion-v1-1"""
lowerCamelCase_ : Any = """CompVis/stable-diffusion-v1-2"""
lowerCamelCase_ : int = """CompVis/stable-diffusion-v1-3"""
lowerCamelCase_ : Any = """CompVis/stable-diffusion-v1-4"""
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Optional[Any]:
super()._init_()
__a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
__a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
__a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
__a = StableDiffusionPipeline(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , requires_safety_checker=UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]:
return {k: getattr(self , UpperCAmelCase ) for k in self.config.keys() if not k.startswith('_' )}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ) -> Optional[Any]:
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ) -> Dict:
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ) -> Tuple:
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ) -> Optional[int]:
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ) -> Any:
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__a = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__a = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__a = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__a = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 246 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [10, 20, 30, 40, 50, 60]
SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 4, 6, 8, 10, 12]
SCREAMING_SNAKE_CASE : Any = 100
self.assertEqual(kp.calc_profit(A, A, A ), 210 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(A, 'max_weight must greater than zero.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(A, 'Weight can not be negative.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(A, 'Profit can not be negative.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(A, 'max_weight must greater than zero.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(
A, 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 28 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :int = 1
lowercase_ :Optional[int] = 3
lowercase_ :Optional[int] = (32, 32)
lowercase_ :str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Optional[int] = self.dummy_cond_unet_upscale
lowercase_ :str = DDPMScheduler()
lowercase_ :Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :Any = self.dummy_vae
lowercase_ :Optional[Any] = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Any = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Tuple = output.images
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :Any = image_from_tuple[0, -3:, -3:, -1]
lowercase_ :str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase_ :Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet_upscale
lowercase_ :int = DDPMScheduler()
lowercase_ :Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :List[Any] = self.dummy_vae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Any = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase_ :List[str] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Dict = output.images
assert image.shape[0] == 2
lowercase_ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :int = self.dummy_cond_unet_upscale
lowercase_ :str = DDPMScheduler()
lowercase_ :List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :Optional[int] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Tuple = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase_ :Any = unet.half()
lowercase_ :Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase_ :str = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''A painting of a squirrel eating a burger'''
lowercase_ :str = torch.manual_seed(0 )
lowercase_ :Union[str, Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
lowercase_ :int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowercase_ :Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :int = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :List[Any] = '''a cat sitting on a park bench'''
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :Optional[Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowercase_ :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :Dict = '''a cat sitting on a park bench'''
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Dict = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :Dict = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase_ :int = '''a cat sitting on a park bench'''
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :Union[str, Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
lowercase_ :str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 257 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = "x" , lowerCAmelCase__ = 10**-10 , lowerCAmelCase__ = 1 , ):
__a = symbols(_SCREAMING_SNAKE_CASE )
__a = lambdify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = lambdify(_SCREAMING_SNAKE_CASE , diff(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__a = starting_point
while True:
if diff_function(_SCREAMING_SNAKE_CASE ) != 0:
__a = prev_guess - multiplicity * func(_SCREAMING_SNAKE_CASE ) / diff_function(
_SCREAMING_SNAKE_CASE )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f'''{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 707 |
import string
import numpy
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
return b if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase__ )
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_lowerCamelCase = numpy.vectorize(lambda __A : x % 36 )
_lowerCamelCase = numpy.vectorize(__A )
def __init__( self , __A ):
__a = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def snake_case_ ( self , __A ):
return self.key_string.index(__A )
def snake_case_ ( self , __A ):
return self.key_string[round(__A )]
def snake_case_ ( self ):
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__A , len(self.key_string ) ) != 1:
__a = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__A )
def snake_case_ ( self , __A ):
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def snake_case_ ( self , __A ):
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__A ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def snake_case_ ( self ):
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def snake_case_ ( self , __A ):
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__A ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a ():
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCAmelCase__ ):
__a = [int(lowerCAmelCase__ ) for x in input().split()]
hill_matrix.append(lowerCAmelCase__ )
__a = HillCipher(numpy.array(lowerCAmelCase__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCAmelCase__ ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 209 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , SCREAMING_SNAKE_CASE_=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , SCREAMING_SNAKE_CASE_=True , ) -> Optional[Any]:
__lowerCamelCase : int = size if size is not None else {'height': 2_24, 'width': 2_24}
__lowerCamelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__lowerCamelCase : Tuple = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : Any = image_size
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : str = do_resize
__lowerCamelCase : Union[str, Any] = size
__lowerCamelCase : Any = do_center_crop
__lowerCamelCase : List[str] = crop_size
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : Optional[int] = image_mean
__lowerCamelCase : int = image_std
__lowerCamelCase : List[str] = do_convert_rgb
def lowercase_ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Dict:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCamelCase : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__lowerCamelCase : int = []
for i in range(self.batch_size ):
__lowerCamelCase , __lowerCamelCase : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCamelCase : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
__lowerCamelCase : Dict = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_convert_rgb' ) )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase_ ( self ) -> str:
pass
def lowercase_ ( self ) -> str:
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> Tuple:
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase : int = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> int:
# Initialize image_processing
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = 3
@property
def lowercase_ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_convert_rgb' ) )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Dict:
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
__lowerCamelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 13 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): # picklable for multiprocessing
return x.sum()
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): # picklable for multiprocessing
return i + 1
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class lowerCAmelCase__ ( a):
'''simple docstring'''
def _lowerCamelCase ( self) -> Optional[Any]:
_A : str = {}
_A : Optional[int] = []
_A : Optional[int] = 1
_A : Any = [1, 2]
_A : Optional[Any] = {"a": 1, "b": 2}
_A : int = {"a": [1, 2], "b": [3, 4]}
_A : int = {"a": {"1": 1}, "b": 2}
_A : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
_A : int = {}
_A : List[Any] = []
_A : List[str] = 2
_A : Optional[int] = [2, 3]
_A : Dict = {"a": 2, "b": 3}
_A : List[Any] = {"a": [2, 3], "b": [4, 5]}
_A : str = {"a": {"1": 2}, "b": 3}
_A : Optional[Any] = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase)
_A : Optional[Any] = 2
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
_A : str = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
_A : Optional[int] = {"a": 2, "b": 0, "c": 2}
_A : Optional[Any] = {
"a": np.eye(2).astype(__lowerCamelCase),
"b": np.zeros(3).astype(__lowerCamelCase),
"c": np.ones(2).astype(__lowerCamelCase),
}
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__lowerCamelCase): # can't pickle a local lambda
map_nested(lambda __lowerCamelCase: x + 1 , __lowerCamelCase , num_proc=__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Tuple = {"a": 1, "b": 2}
_A : Any = {"a": 3, "b": 4}
_A : int = {"a": 5, "b": 6}
_A : int = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)) , __lowerCamelCase)
def _lowerCamelCase ( self) -> int:
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "bar"
_A : List[str] = Foo()
self.assertEqual(foo.my_attr , "bar")
with temporary_assignment(__lowerCamelCase , "my_attr" , "BAR"):
self.assertEqual(foo.my_attr , "BAR")
self.assertEqual(foo.my_attr , "bar")
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_A : List[Any] = {f"{i}": i for i in range(UpperCamelCase__ )}
_A : Tuple = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase__ ( a):
'''simple docstring'''
@require_tf
def _lowerCamelCase ( self) -> int:
import tensorflow as tf
from tensorflow.keras import layers
_A : Optional[int] = layers.Dense(2)
def gen_random_output():
_A : Optional[Any] = tf.random.uniform((1, 3))
return model(__lowerCamelCase).numpy()
with temp_seed(4_2 , set_tensorflow=__lowerCamelCase):
_A : Dict = gen_random_output()
with temp_seed(4_2 , set_tensorflow=__lowerCamelCase):
_A : str = gen_random_output()
_A : Tuple = gen_random_output()
np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def _lowerCamelCase ( self) -> Tuple:
import torch
def gen_random_output():
_A : List[str] = torch.nn.Linear(3 , 2)
_A : List[str] = torch.rand(1 , 3)
return model(__lowerCamelCase).detach().numpy()
with temp_seed(4_2 , set_pytorch=__lowerCamelCase):
_A : Optional[Any] = gen_random_output()
with temp_seed(4_2 , set_pytorch=__lowerCamelCase):
_A : List[str] = gen_random_output()
_A : Dict = gen_random_output()
np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def _lowerCamelCase ( self) -> int:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(4_2):
_A : List[str] = gen_random_output()
with temp_seed(4_2):
_A : List[str] = gen_random_output()
_A : Optional[int] = gen_random_output()
np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("input_data" , [{}] )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : int = NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
_A : List[Any] = NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def _UpperCAmelCase ():
_A : int = A(x=1 , y="foobar" )
_A : Any = {"x": 1, "y": "foobar"}
assert asdict(UpperCamelCase__ ) == expected_output
_A : int = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
_A : int = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=10 , y="foo" )] )
def _UpperCAmelCase (UpperCamelCase__ : str ):
return text.split()
def _UpperCAmelCase (UpperCamelCase__ : int ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _UpperCAmelCase ():
with Pool(2 ) as pool:
_A : str = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_A : Dict = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_A : Dict = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCamelCase__ ) == 4
| 503 | 0 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ : List[Any] = logging.getLogger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = False, ):
"""simple docstring"""
_lowerCAmelCase = bnb_quantization_config.load_in_abit
_lowerCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_lowerCAmelCase = []
# custom device map
if isinstance(__lowerCamelCase, __lowerCamelCase ) and len(device_map.keys() ) > 1:
_lowerCAmelCase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCAmelCase = get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
_lowerCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCAmelCase = []
_lowerCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_lowerCAmelCase = replace_with_bnb_layers(__lowerCamelCase, __lowerCamelCase, modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
_lowerCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCAmelCase = name.replace('.weight', '' ).replace('.bias', '' )
_lowerCAmelCase = getattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_lowerCAmelCase = replace_with_bnb_layers(
__lowerCamelCase, __lowerCamelCase, modules_to_not_convert=__lowerCamelCase )
_lowerCAmelCase = get_quantized_model_device_map(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, max_memory=__lowerCamelCase, no_split_module_classes=__lowerCamelCase, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCAmelCase = True
_lowerCAmelCase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, dtype=bnb_quantization_config.torch_dtype, offload_folder=__lowerCamelCase, offload_state_dict=__lowerCamelCase, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(__lowerCamelCase, device_map=__lowerCamelCase, offload_dir=__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_lowerCAmelCase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_lowerCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCAmelCase = {}
_lowerCAmelCase = special_dtypes
_lowerCAmelCase = no_split_module_classes
_lowerCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCAmelCase = get_balanced_memory(
__lowerCamelCase, low_zero=(device_map == 'balanced_low_0'), max_memory=__lowerCamelCase, **__lowerCamelCase, )
_lowerCAmelCase = max_memory
_lowerCAmelCase = infer_auto_device_map(__lowerCamelCase, **__lowerCamelCase )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
# check if don't have any quantized module on the cpu
_lowerCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None ):
"""simple docstring"""
if modules_to_not_convert is None:
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = _replace_with_bnb_layers(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, ):
"""simple docstring"""
_lowerCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase = []
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCAmelCase = '.'.join(__lowerCamelCase )
_lowerCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCAmelCase = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=__lowerCamelCase, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
_lowerCAmelCase = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_lowerCAmelCase = module.weight.data
if module.bias is not None:
_lowerCAmelCase = module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_lowerCAmelCase = True
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase = _replace_with_bnb_layers(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_lowerCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A__ ( __lowerCamelCase ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
_lowerCAmelCase = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCAmelCase = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase, __lowerCamelCase ):
_lowerCAmelCase = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase = sum(__lowerCamelCase, [] )
_lowerCAmelCase = len(__lowerCamelCase ) > 0
# Check if it is a base model
_lowerCAmelCase = False
if hasattr(__lowerCamelCase, 'base_model_prefix' ):
_lowerCAmelCase = not hasattr(__lowerCamelCase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase = list(model.named_children() )
_lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase = set(__lowerCamelCase ) - set(__lowerCamelCase )
_lowerCAmelCase = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
_lowerCAmelCase = ['.weight', '.bias']
_lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase = name.replace(__lowerCamelCase, '' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def A__ ( __lowerCamelCase ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase, bnb.nn.Linearabit ):
return True
return False
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return next(parameter.parameters() ).device
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase, __lowerCamelCase, 0, dtype=__lowerCamelCase, value=__lowerCamelCase )
_lowerCAmelCase = param_name
_lowerCAmelCase = model
if "." in tensor_name:
_lowerCAmelCase = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCAmelCase = getattr(__lowerCamelCase, __lowerCamelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_lowerCAmelCase = new_module
_lowerCAmelCase = splits[-1]
# offload weights
_lowerCAmelCase = False
offload_weight(module._parameters[tensor_name], __lowerCamelCase, __lowerCamelCase, index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name], 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace('weight', 'SCB' ), __lowerCamelCase, index=__lowerCamelCase, )
else:
offload_weight(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, index=__lowerCamelCase )
offload_weight(__lowerCamelCase, param_name.replace('weight', 'SCB' ), __lowerCamelCase, index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase, __lowerCamelCase, 'meta', dtype=__lowerCamelCase, value=torch.empty(*param.size() ) )
| 309 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__magic_name__ , controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = 'evil space-punk bird'
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = pipe(
__magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 309 | 1 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase__ = 0
for chara, chara in zip(__snake_case ,__snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_a = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_a = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_a = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
def remove_articles(__snake_case ):
lowerCamelCase__ = re.compile(R'''\b(a|an|the)\b''' ,re.UNICODE )
return re.sub(__snake_case ,''' ''' ,__snake_case )
def white_space_fix(__snake_case ):
return " ".join(text.split() )
def remove_punc(__snake_case ):
lowerCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [any(compute_exact(__snake_case ,__snake_case ) for ref in refs ) for pred, refs in zip(__snake_case ,__snake_case )]
return (sum(__snake_case ) / len(__snake_case )) * 100
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase__ = scount * numref
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase__ = ccount * numref
# KEEP
lowerCamelCase__ = sgramcounter_rep & cgramcounter_rep
lowerCamelCase__ = keepgramcounter_rep & rgramcounter
lowerCamelCase__ = sgramcounter_rep & rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = keeptmpscorea / len(__snake_case )
if len(__snake_case ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase__ = sgramcounter_rep - cgramcounter_rep
lowerCamelCase__ = delgramcounter_rep - rgramcounter
lowerCamelCase__ = sgramcounter_rep - rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = deltmpscorea / len(__snake_case )
# ADDITION
lowerCamelCase__ = set(__snake_case ) - set(__snake_case )
lowerCamelCase__ = set(__snake_case ) & set(__snake_case )
lowerCamelCase__ = set(__snake_case ) - set(__snake_case )
lowerCamelCase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = addtmpscore / len(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase__ = addtmpscore / len(__snake_case )
lowerCamelCase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = ssent.split(''' ''' )
lowerCamelCase__ = csent.split(''' ''' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for rsent in rsents:
lowerCamelCase__ = rsent.split(''' ''' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
ragramslist.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase__(__snake_case ,__snake_case = True ,__snake_case = "13a" ,__snake_case = True ) -> Tuple:
'''simple docstring'''
if lowercase:
lowerCamelCase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase__ = sacrebleu.metrics.bleu._get_tokenizer(__snake_case )()(__snake_case )
else:
lowerCamelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(__snake_case )
elif tokenizer == "moses":
lowerCamelCase__ = sacremoses.MosesTokenizer().tokenize(__snake_case ,return_str=__snake_case ,escape=__snake_case )
elif tokenizer == "penn":
lowerCamelCase__ = sacremoses.MosesTokenizer().penn_tokenize(__snake_case ,return_str=__snake_case )
else:
lowerCamelCase__ = sentence
if not return_str:
lowerCamelCase__ = normalized_sent.split()
return normalized_sent
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
if not (len(__snake_case ) == len(__snake_case ) == len(__snake_case )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase__ = 0
for src, pred, refs in zip(__snake_case ,__snake_case ,__snake_case ):
sari_score += SARIsent(normalize(__snake_case ) ,normalize(__snake_case ) ,[normalize(__snake_case ) for sent in refs] )
lowerCamelCase__ = sari_score / len(__snake_case )
return 100 * sari_score
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="exp" ,__snake_case=None ,__snake_case=False ,__snake_case=False ,__snake_case=False ,) -> int:
'''simple docstring'''
lowerCamelCase__ = len(references[0] )
if any(len(__snake_case ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(__snake_case )]
lowerCamelCase__ = sacrebleu.corpus_bleu(
__snake_case ,__snake_case ,smooth_method=__snake_case ,smooth_value=__snake_case ,force=__snake_case ,lowercase=__snake_case ,use_effective_order=__snake_case ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {}
result.update({'''sari''': compute_sari(sources=__lowerCAmelCase , predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
return result
| 481 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["ConditionalDetrFeatureExtractor"]
UpperCAmelCase__ = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 |
import tensorflow as tf
from ...tf_utils import shape_list
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : Optional[Any]=False , **lowerCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [vocab_size]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
__lowercase = keep_order
__lowercase = []
__lowercase = []
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
if self.n_clusters > 0:
__lowercase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_weight''' )
__lowercase = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowercase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
__lowercase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
__lowercase = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.d_embed // (self.div_val**i)
__lowercase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCamelCase__ )
__lowercase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
__lowercase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase_ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]=None ) -> Tuple:
"""simple docstring"""
__lowercase = x
if proj is not None:
__lowercase = tf.einsum('''ibd,ed->ibe''' , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum('''ibd,nd->ibn''' , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def UpperCAmelCase_ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
__lowercase = shape_list(lowerCamelCase__ )
__lowercase = tf.range(lp_size[0] , dtype=target.dtype )
__lowercase = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase = 0
if self.n_clusters == 0:
__lowercase = self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowercase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
__lowercase = tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
__lowercase = shape_list(lowerCamelCase__ )
__lowercase = []
__lowercase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowercase = (target >= l_idx) & (target < r_idx)
__lowercase = tf.where(lowerCamelCase__ )
__lowercase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
__lowercase = self.out_layers[0][0][l_idx:r_idx]
__lowercase = self.out_layers[0][1][l_idx:r_idx]
else:
__lowercase = self.out_layers[i][0]
__lowercase = self.out_layers[i][1]
if i == 0:
__lowercase = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowercase = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowercase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
__lowercase = tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowercase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
__lowercase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
__lowercase = tf.nn.log_softmax(lowerCamelCase__ )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowercase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
__lowercase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
__lowercase = tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
__lowercase = tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 362 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : Optional[int] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def a__ ( snake_case__ , snake_case__ ) -> List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def a__ ( snake_case__ ) -> Union[str, Any]:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Dict:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCamelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCamelCase = test_hf_cache_home / """datasets"""
lowerCamelCase = test_hf_cache_home / """metrics"""
lowerCamelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(snake_case__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(snake_case__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(snake_case__ ) )
lowerCamelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(snake_case__ ) )
lowerCamelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(snake_case__ ) )
@pytest.fixture(autouse=snake_case__ , scope="""session""" )
def a__ ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=snake_case__ )
def a__ ( snake_case__ ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , snake_case__ )
@pytest.fixture
def a__ ( snake_case__ ) -> str:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , snake_case__ )
| 543 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mctct"
def __init__( self , _a=8_065 , _a=1_536 , _a=36 , _a=6_144 , _a=4 , _a=384 , _a=920 , _a=1e-5 , _a=0.3 , _a="relu" , _a=0.02 , _a=0.3 , _a=0.3 , _a=1 , _a=0 , _a=2 , _a=1 , _a=0.3 , _a=1 , _a=(7,) , _a=(3,) , _a=80 , _a=1 , _a=None , _a="sum" , _a=False , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = num_attention_heads
lowerCamelCase = attention_head_dim
lowerCamelCase = max_position_embeddings
lowerCamelCase = layer_norm_eps
lowerCamelCase = layerdrop
lowerCamelCase = hidden_act
lowerCamelCase = initializer_range
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = pad_token_id
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
lowerCamelCase = conv_glu_dim
lowerCamelCase = conv_dropout
lowerCamelCase = num_conv_layers
lowerCamelCase = input_feat_per_channel
lowerCamelCase = input_channels
lowerCamelCase = conv_channels
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 543 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'image': Image()} )
_UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] )-> str:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.label_schema.copy()
lowercase__ = features[self.label_column]
lowercase__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 45 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45 | 1 |
import re
from filelock import FileLock
try:
import nltk
a = True
except (ImportError, ModuleNotFoundError):
a = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
re.sub('<n>' , '' , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) ) | 687 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Any = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xglm'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=25_6008 , _SCREAMING_SNAKE_CASE: Dict=2048 , _SCREAMING_SNAKE_CASE: int=1024 , _SCREAMING_SNAKE_CASE: Dict=4096 , _SCREAMING_SNAKE_CASE: Optional[Any]=24 , _SCREAMING_SNAKE_CASE: int=16 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=2 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : List[Any] = ffn_dim
__lowerCAmelCase : int = num_layers
__lowerCAmelCase : Any = attention_heads
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : Optional[int] = init_std
__lowerCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Dict = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 293 | 0 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__ : List[str] = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Tuple = grid[row_n]
lowercase__ : Optional[int] = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = grid[row_n]
return grid[-1][-1]
def UpperCamelCase ( lowercase_ , lowercase_ ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if components is None:
lowercase__ : List[str] = []
lowercase__ : Dict = list(SCREAMING_SNAKE_CASE_)
def __len__( self):
'''simple docstring'''
return len(self.__components)
def __str__( self):
'''simple docstring'''
return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components)) + ")"
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else:
raise Exception("""must have the same size""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""must have the same size""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (float, int)):
lowercase__ : Optional[int] = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(self) == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Dict = len(self)
lowercase__ : Optional[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return sum(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""invalid operand!""")
def lowercase__ ( self):
'''simple docstring'''
return Vector(self.__components)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("""index out of range""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert -len(self.__components) <= pos < len(self.__components)
lowercase__ : List[Any] = value
def lowercase__ ( self):
'''simple docstring'''
if len(self.__components) == 0:
raise Exception("""Vector is empty""")
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
lowercase__ : Union[str, Any] = self * other
lowercase__ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def UpperCamelCase ( lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
return Vector([0] * dimension )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , lowercase_ ))
lowercase__ : Union[str, Any] = [0] * dimension
lowercase__ : Any = 1
return Vector(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert (
isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
and (isinstance(lowercase_ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : int = [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )]
return Vector(lowercase_ )
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = matrix
lowercase__ : Any = w
lowercase__ : Any = h
def __str__( self):
'''simple docstring'''
lowercase__ : str = """"""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Tuple = []
for i in range(self.__height):
lowercase__ : Tuple = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrix must have the same dimension!""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Optional[int] = []
for i in range(self.__height):
lowercase__ : List[str] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrices must have the same dimension!""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # matrix-vector
if len(SCREAMING_SNAKE_CASE_) == self.__width:
lowercase__ : List[Any] = zero_vector(self.__height)
for i in range(self.__height):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_))
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""")
elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # matrix-scalar
lowercase__ : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
return None
def lowercase__ ( self):
'''simple docstring'''
return self.__height
def lowercase__ ( self):
'''simple docstring'''
return self.__width
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Tuple = value
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
lowercase__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1).determinant()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
else:
raise Exception("""Indices out of bounds""")
def lowercase__ ( self):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if self.__height < 1:
raise Exception("""Matrix has no element""")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_) for y in range(self.__width)
]
return sum(SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( lowercase_ ) -> Matrix:
'''simple docstring'''
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowercase_ )]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Matrix:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : list[list[float]] = [
[random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] for _ in range(lowercase_ )
]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
| 495 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : dict ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =set()
# edges = list of graph's edges
_UpperCAmelCase : Dict =get_edges(__lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCAmelCase , _UpperCAmelCase : Dict =edges.pop()
chosen_vertices.add(__lowerCamelCase )
chosen_vertices.add(__lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCamelCase )
return chosen_vertices
def lowerCamelCase__ ( __lowerCamelCase : dict ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 446 |
'''simple docstring'''
from typing import Any
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase : str =v.conjugate().T
_UpperCAmelCase : Optional[int] =v_star.dot(__lowerCamelCase )
assert isinstance(__lowerCamelCase , np.ndarray )
return (v_star_dot.dot(__lowerCamelCase )) / (v_star.dot(__lowerCamelCase ))
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCAmelCase : List[str] =np.array([[1], [2], [3]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) )
_UpperCAmelCase : List[str] =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 446 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Optional[Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a_ : Tuple = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a_ : Optional[Any] = sorted(lowercase__ )
# declaring useful variables
a_ : List[Any] = len(lowercase__ )
a_ : Union[str, Any] = 0
a_ : Union[str, Any] = 0
a_ : str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a_ : Tuple = sorted_profit_by_weight[length - i - 1]
a_ : List[str] = profit_by_weight.index(lowercase__ )
a_ : str = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
UpperCAmelCase_ : List[str] = [int(x) for x in input('Input profits separated by spaces: ').split()]
UpperCAmelCase_ : int = [int(x) for x in input('Input weights separated by spaces: ').split()]
UpperCAmelCase_ : Union[str, Any] = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 719 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple ) -> Any:
"""simple docstring"""
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Any , __A : Tuple ) -> Tuple:
"""simple docstring"""
a_ : Dict = tmp_path / 'cache'
a_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Union[str, Any] = ParquetDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Dict , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
a_ : Tuple = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : List[str] = features.copy() if features else default_expected_features
a_ : int = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : List[str] = ParquetDatasetReader(__A , features=__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] , __A : int ) -> List[str]:
"""simple docstring"""
a_ : int = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : List[str] = ParquetDatasetReader(__A , cache_dir=__A , split=__A ).read()
_check_parquet_dataset(__A , __A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str , __A : Optional[int] ) -> Any:
"""simple docstring"""
if issubclass(__A , __A ):
a_ : Tuple = parquet_path
elif issubclass(__A , __A ):
a_ : str = [parquet_path]
a_ : int = tmp_path / 'cache'
a_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : str = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Dict , __A : Optional[Any]=("train",) ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__A , __A )
for split in splits:
a_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ : Union[str, Any] = tmp_path / 'cache'
a_ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Tuple = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[int] , __A : Tuple ) -> List[Any]:
"""simple docstring"""
a_ : Optional[Any] = tmp_path / 'cache'
a_ : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Optional[int] = features.copy() if features else default_expected_features
a_ : Tuple = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Optional[Any] = ParquetDatasetReader({'train': parquet_path} , features=__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] , __A : Optional[Any] ) -> Any:
"""simple docstring"""
if split:
a_ : Any = {split: parquet_path}
else:
a_ : Dict = 'train'
a_ : int = {'train': parquet_path, 'test': parquet_path}
a_ : int = tmp_path / 'cache'
a_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Tuple = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
a_ : List[str] = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a_ : List[str] = pq.ParquetFile(tmp_path / 'foo.parquet' )
a_ : Dict = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Any ) -> Optional[int]:
"""simple docstring"""
a_ : str = str(shared_datadir / 'test_image_rgb.jpg' )
a_ : List[Any] = {'image': [image_path]}
a_ : int = Features({'image': Image()} )
a_ : List[Any] = Dataset.from_dict(__A , features=__A )
a_ : str = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a_ : str = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
a_ : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[str] ) -> List[str]:
"""simple docstring"""
assert get_writer_batch_size(__A ) == expected
| 443 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 496 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(batch_size=2 ,length=snake_case__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(snake_case__ ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ : Any = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : int = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp_safety_check(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[str] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Tuple = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# with processor list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : Any = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = 10
# no processor list
def run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : List[str] = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
SCREAMING_SNAKE_CASE_ : Tuple = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = jitted_run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 105 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase__ = """
Human: <<task>>
Assistant: """
lowerCAmelCase__ = """huggingface-tools/default-prompts"""
lowerCAmelCase__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str]="run" ) -> Optional[Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
A__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE_ ) is not None:
return prompt_or_repo_id
A__ = cached_file(
SCREAMING_SNAKE_CASE_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 707 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = "cpu" , SCREAMING_SNAKE_CASE_: Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 626 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCAmelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
lowercase =torchvision.models.resnetaaa(pretrained=snake_case_ )
lowercase =list(model.children() )[:-2]
lowercase =nn.Sequential(*snake_case_ )
lowercase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _A( self , snake_case_ ):
lowercase =self.pool(self.model(snake_case_ ) )
lowercase =torch.flatten(snake_case_ , start_dim=2 )
lowercase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =[json.loads(snake_case_ ) for l in open(snake_case_ )]
lowercase =os.path.dirname(snake_case_ )
lowercase =tokenizer
lowercase =labels
lowercase =len(snake_case_ )
lowercase =max_seq_length
lowercase =transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , snake_case_ ):
lowercase =torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=snake_case_ ) )
lowercase =sentence[0], sentence[1:-1], sentence[-1]
lowercase =sentence[: self.max_seq_length]
lowercase =torch.zeros(self.n_classes )
lowercase =1
lowercase =Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowercase =self.transforms(snake_case_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _A( self ):
lowercase =Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def UpperCamelCase ( lowercase_ : str ) -> List[str]:
'''simple docstring'''
lowercase =[len(row['''sentence'''] ) for row in batch]
lowercase =len(_UpperCamelCase ), max(_UpperCamelCase )
lowercase =torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
lowercase =torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
lowercase =input_row["""sentence"""]
lowercase =1
lowercase =torch.stack([row['''image'''] for row in batch] )
lowercase =torch.stack([row['''label'''] for row in batch] )
lowercase =torch.stack([row['''image_start_token'''] for row in batch] )
lowercase =torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 72 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = Sql(
cache_dir=UpperCamelCase , features=UpperCamelCase , sql=UpperCamelCase , con=UpperCamelCase , **UpperCamelCase , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , )
# Build dataset for splits
__UpperCAmelCase : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__UpperCAmelCase : Tuple = dataset
__UpperCAmelCase : int = name
__UpperCAmelCase : Union[str, Any] = con
__UpperCAmelCase : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCAmelCase : Optional[int] = num_proc
__UpperCAmelCase : Any = to_sql_kwargs
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase )
__UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""con""" , UpperCamelCase )
__UpperCAmelCase : Any = self.to_sql_kwargs.pop("""index""" , UpperCamelCase )
__UpperCAmelCase : Dict = self._write(index=UpperCamelCase , **self.to_sql_kwargs )
return written
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = args
__UpperCAmelCase : Optional[int] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
__UpperCAmelCase : Optional[int] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCAmelCase : Optional[int] = batch.to_pandas()
__UpperCAmelCase : Union[str, Any] = df.to_sql(self.name , self.con , index=UpperCamelCase , **UpperCamelCase )
return num_rows or len(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCAmelCase ,__UpperCAmelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase , UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 139 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ ) ->list:
"""simple docstring"""
if n_term == "":
return []
__UpperCAmelCase : list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ :Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term)) | 374 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Dict = nn.Linear(3 , 4 )
__UpperCAmelCase : Union[str, Any] = nn.BatchNormad(4 )
__UpperCAmelCase : List[str] = nn.Linear(4 , 5 )
def A_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowercase ) ) )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] , __lowercase : Optional[int] , *__lowercase : str , **__lowercase : Optional[int] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Any , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Optional[int] = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
self.assertEqual(test_model._hf_hook , __lowercase )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = ModelForTest()
__UpperCAmelCase : Tuple = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
add_hook_to_module(__lowercase , __lowercase , append=__lowercase )
self.assertEqual(isinstance(test_model._hf_hook , __lowercase ) , __lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Tuple = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = test_model(x + 1 )
__UpperCAmelCase : Optional[Any] = test_model(x + 2 )
__UpperCAmelCase : Optional[int] = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : int = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Any = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = test_model(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1e-5 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : Tuple = test_model(__lowercase )
__UpperCAmelCase : int = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : str = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : List[str] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Dict = test_model(__lowercase )
assert torch.allclose(__lowercase , output + 2 , atol=1e-5 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
__UpperCAmelCase : str = test_model(__lowercase )
__UpperCAmelCase : Union[str, Any] = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = test_model(__lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(__lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowercase , AlignDevicesHook(io_same_device=__lowercase ) )
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 ).to(0 )
__UpperCAmelCase : int = model(__lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Tuple = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[int] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : int = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__UpperCAmelCase : str = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Optional[Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Dict = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Optional[int] = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase , offload_buffers=__lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : str = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[Any] = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Any = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() , offload_buffers=__lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) | 374 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case (unittest.TestCase ):
def _a ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self ) -> List[Any]:
lowercase__ , lowercase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCAmelCase_ ,dtype=jnp.bfloataa )
lowercase__ , lowercase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCAmelCase_ ,from_pt=UpperCAmelCase_ ,dtype=jnp.bfloataa )
lowercase__ = controlnet_params
lowercase__ = "bird"
lowercase__ = jax.device_count()
lowercase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
lowercase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCAmelCase_ ,jax.device_count() )
lowercase__ = replicate(UpperCAmelCase_ )
lowercase__ = shard(UpperCAmelCase_ )
lowercase__ = shard(UpperCAmelCase_ )
lowercase__ = pipe(
prompt_ids=UpperCAmelCase_ ,image=UpperCAmelCase_ ,params=UpperCAmelCase_ ,prng_seed=UpperCAmelCase_ ,num_inference_steps=50 ,jit=UpperCAmelCase_ ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _a ( self ) -> List[Any]:
lowercase__ , lowercase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCAmelCase_ ,dtype=jnp.bfloataa )
lowercase__ , lowercase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCAmelCase_ ,from_pt=UpperCAmelCase_ ,dtype=jnp.bfloataa )
lowercase__ = controlnet_params
lowercase__ = "Chef in the kitchen"
lowercase__ = jax.device_count()
lowercase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
lowercase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCAmelCase_ ,jax.device_count() )
lowercase__ = replicate(UpperCAmelCase_ )
lowercase__ = shard(UpperCAmelCase_ )
lowercase__ = shard(UpperCAmelCase_ )
lowercase__ = pipe(
prompt_ids=UpperCAmelCase_ ,image=UpperCAmelCase_ ,params=UpperCAmelCase_ ,prng_seed=UpperCAmelCase_ ,num_inference_steps=50 ,jit=UpperCAmelCase_ ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 267 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : int ):
'''simple docstring'''
self.test()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[str] = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase : str = self.advance()
if not self.does_advance(lowercase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.update(lowercase )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self : Any , lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self : int , lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self : str , lowercase : int=False ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : str , lowercase : List[int] ):
'''simple docstring'''
super(lowercase , self ).__init__()
if not isinstance(lowercase , lowercase ) or len(lowercase ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(lowercase , lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
UpperCAmelCase : Tuple = token_ids
UpperCAmelCase : Union[str, Any] = len(self.token_ids )
UpperCAmelCase : Dict = -1 # the index of the currently fulfilled step
UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(lowercase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : str , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(lowercase )}""" )
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Optional[Any] = False
if self.does_advance(lowercase ):
self.fulfilled_idx += 1
UpperCAmelCase : Dict = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = completed
else:
# failed to make progress.
UpperCAmelCase : List[Any] = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = False
UpperCAmelCase : List[Any] = 0
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self : List[str] , lowercase : int=False ):
'''simple docstring'''
UpperCAmelCase : str = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase : str = self.seqlen
UpperCAmelCase : List[Any] = self.fulfilled_idx
UpperCAmelCase : List[str] = self.completed
return new_constraint
class snake_case__ :
def __init__( self : Any , lowercase : List[List[int]] , lowercase : Any=True ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = max([len(lowercase ) for one in nested_token_ids] )
UpperCAmelCase : Any = {}
for token_ids in nested_token_ids:
UpperCAmelCase : str = root
for tidx, token_id in enumerate(lowercase ):
if token_id not in level:
UpperCAmelCase : int = {}
UpperCAmelCase : Tuple = level[token_id]
if no_subsets and self.has_subsets(lowercase , lowercase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f""" {nested_token_ids}.""" )
UpperCAmelCase : Optional[int] = root
def __lowerCAmelCase ( self : List[str] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.trie
for current_token in current_seq:
UpperCAmelCase : List[Any] = start[current_token]
UpperCAmelCase : List[str] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self : Any , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : Dict = self.next_tokens(lowercase )
return len(lowercase ) == 0
def __lowerCAmelCase ( self : Tuple , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : Tuple = list(root.values() )
if len(lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase ) for nn in next_nodes] )
def __lowerCAmelCase ( self : Tuple , lowercase : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Dict = self.count_leaves(lowercase )
return len(lowercase ) != leaf_count
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : Tuple , lowercase : List[List[int]] ):
'''simple docstring'''
super(lowercase , self ).__init__()
if not isinstance(lowercase , lowercase ) or len(lowercase ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(lowercase , lowercase ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(lowercase , lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
UpperCAmelCase : Tuple = DisjunctiveTrie(lowercase )
UpperCAmelCase : Tuple = nested_token_ids
UpperCAmelCase : List[str] = self.trie.max_height
UpperCAmelCase : List[str] = []
UpperCAmelCase : Dict = False
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
if len(lowercase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase )}""" )
UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase )}""" )
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = False
if self.does_advance(lowercase ):
self.current_seq.append(lowercase )
UpperCAmelCase : Tuple = True
else:
UpperCAmelCase : str = True
self.reset()
UpperCAmelCase : Optional[int] = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase : Any = completed
return stepped, completed, reset
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = []
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : str=False ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase : Tuple = self.seqlen
UpperCAmelCase : str = self.current_seq
UpperCAmelCase : Union[str, Any] = self.completed
return new_constraint
class snake_case__ :
def __init__( self : int , lowercase : List[Constraint] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase : Tuple = max([c.seqlen for c in constraints] )
UpperCAmelCase : int = len(lowercase )
UpperCAmelCase : Optional[Any] = False
self.init_state()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Dict = [constraint.copy(stateful=lowercase ) for constraint in self.constraints]
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase : Dict = constraint.advance()
if isinstance(lowercase , lowercase ):
token_list.append(lowercase )
elif isinstance(lowercase , lowercase ):
token_list.extend(lowercase )
else:
UpperCAmelCase : Optional[Any] = self.inprogress_constraint.advance()
if isinstance(lowercase , lowercase ):
token_list.append(lowercase )
elif isinstance(lowercase , lowercase ):
token_list.extend(lowercase )
if len(lowercase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : Tuple , lowercase : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase , UpperCAmelCase : Tuple = self.add(lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
UpperCAmelCase , UpperCAmelCase : Tuple = False, False
if self.completed:
UpperCAmelCase : Any = True
UpperCAmelCase : int = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.inprogress_constraint.update(lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase ) )
UpperCAmelCase : Tuple = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase : int = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = pending_constraint.update(lowercase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(lowercase )
UpperCAmelCase : List[Any] = None
if not complete and stepped:
UpperCAmelCase : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase : List[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase : Union[str, Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self : Tuple , lowercase : List[Any]=True ):
'''simple docstring'''
UpperCAmelCase : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase : Dict = [
constraint.copy(stateful=lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase : Optional[Any] = self.inprogress_constraint.copy(stateful=lowercase )
UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 292 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowercase : Optional[int] , _lowercase : int ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = tmp_path / "cache"
UpperCAmelCase : str = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = tmp_path / "cache"
UpperCAmelCase : Any = {"text": "string"}
UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : str = TextDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase : str = tmp_path / "cache"
UpperCAmelCase : Any = {"text": "string"}
UpperCAmelCase : List[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase_ ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : Union[str, Any] ):
'''simple docstring'''
if issubclass(_lowercase , _lowercase ):
UpperCAmelCase : List[str] = text_path
elif issubclass(_lowercase , _lowercase ):
UpperCAmelCase : List[Any] = [text_path]
UpperCAmelCase : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase : List[Any] = {"text": "string"}
UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
def lowercase_ ( _lowercase : Dict , _lowercase : int , _lowercase : Optional[Any]=("train",) ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
for split in splits:
UpperCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( _lowercase : Dict , _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : str = tmp_path / "cache"
UpperCAmelCase : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader({"train": text_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : int = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : List[str] = {"text": "string"}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[int] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[int] = TextDatasetReader({"train": text_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : Optional[Any] ):
'''simple docstring'''
if split:
UpperCAmelCase : Optional[Any] = {split: text_path}
else:
UpperCAmelCase : str = "train"
UpperCAmelCase : Union[str, Any] = {"train": text_path, "test": text_path}
UpperCAmelCase : Any = tmp_path / "cache"
UpperCAmelCase : List[Any] = {"text": "string"}
UpperCAmelCase : Any = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 292 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__magic_name__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = BertTokenizer
def __init__( self : Optional[int] ,_a : Tuple=None ,_a : Tuple=None ,_a : Union[str, Any]=True ,_a : List[Any]="[UNK]" ,_a : Any="[SEP]" ,_a : Union[str, Any]="[PAD]" ,_a : List[str]="[CLS]" ,_a : Dict="[MASK]" ,_a : Tuple=True ,_a : Dict=None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : str = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : List[str] = do_lower_case
A_ : int = strip_accents
A_ : Tuple = tokenize_chinese_chars
A_ : Dict = normalizer_class(**_a )
A_ : List[Any] = do_lower_case
def _a ( self : str ,_a : Tuple ,_a : Optional[Any]=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Tuple ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : Union[str, Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 35 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( __UpperCamelCase=None )-> Union[str, Any]:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""env""" )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = is_xpu_available()
UpperCamelCase = is_npu_available()
UpperCamelCase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__UpperCamelCase ),
"""PyTorch NPU available""": str(__UpperCamelCase ),
"""System RAM""": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCamelCase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F"\t{accelerate_config}"
)
print(__UpperCamelCase )
UpperCamelCase = accelerate_config
return info
def lowercase__ ( )-> int:
UpperCamelCase = env_command_parser()
UpperCamelCase = parser.parse_args()
env_command(__UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 35 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str
__snake_case : List[str]
__snake_case : Optional[List[str]]
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : List[int]
__snake_case : List[int]
__snake_case : Optional[List[int]] = None
__snake_case : Optional[List[int]] = None
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : str = """train"""
__snake_case : Tuple = """dev"""
__snake_case : str = """test"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@staticmethod
def __lowercase ( __lowercase :List[str] , __lowercase :Union[Split, str] ):
raise NotImplementedError
@staticmethod
def __lowercase ( __lowercase :str ):
raise NotImplementedError
@staticmethod
def __lowercase ( __lowercase :List[InputExample] , __lowercase :List[str] , __lowercase :int , __lowercase :PreTrainedTokenizer , __lowercase :int=False , __lowercase :List[str]="[CLS]" , __lowercase :List[str]=1 , __lowercase :Optional[int]="[SEP]" , __lowercase :Dict=False , __lowercase :int=False , __lowercase :Dict=0 , __lowercase :str=0 , __lowercase :str=-100 , __lowercase :int=0 , __lowercase :Optional[Any]=True , ):
__lowerCamelCase : int ={label: i for i, label in enumerate(__lowercase )}
__lowerCamelCase : Optional[int] =[]
for ex_index, example in enumerate(__lowercase ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __lowercase , len(__lowercase ) )
__lowerCamelCase : Optional[Any] =[]
__lowerCamelCase : Optional[int] =[]
for word, label in zip(example.words , example.labels ):
__lowerCamelCase : int =tokenizer.tokenize(__lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowercase ) > 0:
tokens.extend(__lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(__lowercase ) > max_seq_length - special_tokens_count:
__lowerCamelCase : Optional[Any] =tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase : Optional[Any] =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase : Any =[sequence_a_segment_id] * len(__lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase : Optional[Any] =[cls_token] + tokens
__lowerCamelCase : List[str] =[pad_token_label_id] + label_ids
__lowerCamelCase : int =[cls_token_segment_id] + segment_ids
__lowerCamelCase : Optional[int] =tokenizer.convert_tokens_to_ids(__lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase : Union[str, Any] =[1 if mask_padding_with_zero else 0] * len(__lowercase )
# Zero-pad up to the sequence length.
__lowerCamelCase : Tuple =max_seq_length - len(__lowercase )
if pad_on_left:
__lowerCamelCase : Optional[Any] =([pad_token] * padding_length) + input_ids
__lowerCamelCase : Any =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase : List[str] =([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase : Optional[Any] =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase : Optional[Any] =None
features.append(
InputFeatures(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , label_ids=__lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[InputFeatures]
__snake_case : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :List[Any] , __lowercase :TokenClassificationTask , __lowercase :str , __lowercase :PreTrainedTokenizer , __lowercase :List[str] , __lowercase :str , __lowercase :Optional[int] = None , __lowercase :str=False , __lowercase :Split = Split.train , ):
# Load data features from cache or dataset file
__lowerCamelCase : int =os.path.join(
__lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase : Dict =cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
__lowerCamelCase : Optional[Any] =torch.load(__lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
__lowerCamelCase : Tuple =token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase : str =token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __lowercase )
def __len__( self :Tuple ):
return len(self.features )
def __getitem__( self :Optional[int] , __lowercase :Any ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : List[InputFeatures]
__snake_case : int = -1_00
def __init__( self :Optional[Any] , __lowercase :TokenClassificationTask , __lowercase :str , __lowercase :PreTrainedTokenizer , __lowercase :List[str] , __lowercase :str , __lowercase :Optional[int] = None , __lowercase :Any=False , __lowercase :Split = Split.train , ):
__lowerCamelCase : Union[str, Any] =token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase : Optional[int] =token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase : str =tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase : List[str] =tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowercase ( self :Any ):
__lowerCamelCase : List[str] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :Optional[Any] ):
return len(self.features )
def __getitem__( self :Optional[int] , __lowercase :Any ):
return self.features[i]
| 179 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__snake_case : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__snake_case : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__snake_case : Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__snake_case : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
__snake_case : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__snake_case : Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__snake_case : Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__snake_case : Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__snake_case : Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
__snake_case : Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__snake_case : Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__snake_case : Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""} )
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Sample from the language model's output distribution."""} )
__snake_case : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__snake_case : Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__snake_case : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__snake_case : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__snake_case : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__snake_case : Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__snake_case : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__snake_case : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__snake_case : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__snake_case : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__snake_case : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__snake_case : Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__snake_case : Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__snake_case : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__snake_case : Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__snake_case : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__snake_case : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__snake_case : Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__snake_case : Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__snake_case : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__snake_case : Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__snake_case : Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 179 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a ( UpperCamelCase_ : List[Any] ) -> Any:
snake_case__ =fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase_ ).groups()[0]
class a__( snake_case__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[Any]:
snake_case__ =file_names
snake_case__ =image_transform
snake_case__ =label_to_id
def __len__( self ) -> Tuple:
return len(self.file_names )
def __getitem__( self , _UpperCAmelCase ) -> Dict:
snake_case__ =self.file_names[idx]
snake_case__ =PIL.Image.open(_UpperCAmelCase )
snake_case__ =raw_image.convert('RGB' )
if self.image_transform is not None:
snake_case__ =self.image_transform(_UpperCAmelCase )
snake_case__ =extract_label(_UpperCAmelCase )
if self.label_to_id is not None:
snake_case__ =self.label_to_id[label]
return {"image": image, "label": label}
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
# Initialize accelerator
if args.with_tracking:
snake_case__ =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
snake_case__ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ =config['lr']
snake_case__ =int(config['num_epochs'] )
snake_case__ =int(config['seed'] )
snake_case__ =int(config['batch_size'] )
snake_case__ =config['image_size']
if not isinstance(UpperCamelCase_ , (list, tuple) ):
snake_case__ =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
snake_case__ =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case__ =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
snake_case__ =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case__ =os.path.split(UpperCamelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Grab all the image filenames
snake_case__ =[os.path.join(args.data_dir , UpperCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
snake_case__ =[extract_label(UpperCamelCase_ ) for fname in file_names]
snake_case__ =list(set(UpperCamelCase_ ) )
id_to_label.sort()
snake_case__ ={lbl: i for i, lbl in enumerate(UpperCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# Split our filenames between train and validation
snake_case__ =np.random.permutation(len(UpperCamelCase_ ) )
snake_case__ =int(0.8 * len(UpperCamelCase_ ) )
snake_case__ =random_perm[:cut]
snake_case__ =random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case__ =Compose([RandomResizedCrop(UpperCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
snake_case__ =PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# For evaluation, we use a deterministic Resize
snake_case__ =Compose([Resize(UpperCamelCase_ ), ToTensor()] )
snake_case__ =PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# Instantiate dataloaders.
snake_case__ =DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
snake_case__ =DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ =create_model('resnet50d' , pretrained=UpperCamelCase_ , num_classes=len(UpperCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case__ =False
for param in model.get_classifier().parameters():
snake_case__ =True
# We normalize the batches of images to be a bit faster.
snake_case__ =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
snake_case__ =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case__ =torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
snake_case__ =OneCycleLR(optimizer=UpperCamelCase_ , max_lr=UpperCamelCase_ , epochs=UpperCamelCase_ , steps_per_epoch=len(UpperCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ =accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
snake_case__ =0
# We also need to keep track of the starting epoch so files are named properly
snake_case__ =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case__ =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case__ =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case__ =os.path.splitext(UpperCamelCase_ )[0]
if "epoch" in training_difference:
snake_case__ =int(training_difference.replace('epoch_' , '' ) ) + 1
snake_case__ =None
else:
snake_case__ =int(training_difference.replace('step_' , '' ) )
snake_case__ =resume_step // len(UpperCamelCase_ )
resume_step -= starting_epoch * len(UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ , UpperCamelCase_ ):
model.train()
if args.with_tracking:
snake_case__ =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case__ =accelerator.skip_first_batches(UpperCamelCase_ , UpperCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case__ =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case__ ={k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case__ =(batch['image'] - mean) / std
snake_case__ =model(UpperCamelCase_ )
snake_case__ =torch.nn.functional.cross_entropy(UpperCamelCase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case__ =os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
model.eval()
snake_case__ =0
snake_case__ =0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case__ ={k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case__ =(batch['image'] - mean) / std
with torch.no_grad():
snake_case__ =model(UpperCamelCase_ )
snake_case__ =outputs.argmax(dim=-1 )
snake_case__ , snake_case__ =accelerator.gather_for_metrics((predictions, batch['label']) )
snake_case__ =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case__ =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase_ ),
'epoch': epoch,
} , step=UpperCamelCase_ , )
if checkpointing_steps == "epoch":
snake_case__ =f"""epoch_{epoch}"""
if args.output_dir is not None:
snake_case__ =os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def a ( ) -> List[str]:
snake_case__ =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
snake_case__ =parser.parse_args()
snake_case__ ={'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 581 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE__ : Any = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
SCREAMING_SNAKE_CASE__ : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def a ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
def remove_articles(UpperCamelCase_ : List[str] ):
snake_case__ =re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase_ , ' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : List[str] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Tuple ):
snake_case__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
snake_case__ =[any(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for ref in refs ) for pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ )]
return (sum(UpperCamelCase_ ) / len(UpperCamelCase_ )) * 100
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Union[str, Any]:
snake_case__ =[rgram for rgrams in rgramslist for rgram in rgrams]
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for sgram, scount in sgramcounter.items():
snake_case__ =scount * numref
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for cgram, ccount in cgramcounter.items():
snake_case__ =ccount * numref
# KEEP
snake_case__ =sgramcounter_rep & cgramcounter_rep
snake_case__ =keepgramcounter_rep & rgramcounter
snake_case__ =sgramcounter_rep & rgramcounter
snake_case__ =0
snake_case__ =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =keeptmpscorea / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case__ =keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case__ =0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case__ =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case__ =sgramcounter_rep - cgramcounter_rep
snake_case__ =delgramcounter_rep - rgramcounter
snake_case__ =sgramcounter_rep - rgramcounter
snake_case__ =0
snake_case__ =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =deltmpscorea / len(UpperCamelCase_ )
# ADDITION
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) & set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
snake_case__ =0
if addscore_precision > 0 or addscore_recall > 0:
snake_case__ =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Optional[int]:
snake_case__ =len(UpperCamelCase_ )
snake_case__ =ssent.split(' ' )
snake_case__ =csent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
for rsent in rsents:
snake_case__ =rsent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case__ =sum([delascore, delascore, delascore, delascore] ) / 4
snake_case__ =sum([addascore, addascore, addascore, addascore] ) / 4
snake_case__ =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "13a" , UpperCamelCase_ : bool = True ) -> Dict:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
snake_case__ =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case__ =sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase_ )()(UpperCamelCase_ )
else:
snake_case__ =sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase_ )
elif tokenizer == "moses":
snake_case__ =sacremoses.MosesTokenizer().tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ , escape=UpperCamelCase_ )
elif tokenizer == "penn":
snake_case__ =sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ )
else:
snake_case__ =sentence
if not return_str:
snake_case__ =normalized_sent.split()
return normalized_sent
def a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[str]:
if not (len(UpperCamelCase_ ) == len(UpperCamelCase_ ) == len(UpperCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
snake_case__ =0
for src, pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
sari_score += SARIsent(normalize(UpperCamelCase_ ) , normalize(UpperCamelCase_ ) , [normalize(UpperCamelCase_ ) for sent in refs] )
snake_case__ =sari_score / len(UpperCamelCase_ )
return 100 * sari_score
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]="exp" , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=False , ) -> Tuple:
snake_case__ =len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
snake_case__ =[[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
snake_case__ =sacrebleu.corpus_bleu(
UpperCamelCase_ , UpperCamelCase_ , smooth_method=UpperCamelCase_ , smooth_value=UpperCamelCase_ , force=UpperCamelCase_ , lowercase=UpperCamelCase_ , use_effective_order=UpperCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ ={}
result.update({'sari': compute_sari(sources=_UpperCAmelCase , predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'exact': compute_em(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
return result
| 581 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_snake_case : Dict = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , a__):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj)
assert isinstance(_test_patching.os.path , _PatchedModuleObj)
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj)
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj)
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj)
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj)
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
assert _test_patching.open is open
_snake_case : List[str] = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , a__):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , a__):
pass
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_snake_case : Dict = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , a__) is None
with patch_submodule(_test_patching , 'len' , a__):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = '__test_patch_submodule_start_and_stop_mock__'
_snake_case : List[str] = patch_submodule(_test_patching , 'open' , a__)
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_snake_case : Tuple = '__test_patch_submodule_successive_join__'
_snake_case : List[str] = '__test_patch_submodule_successive_dirname__'
_snake_case : Dict = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , a__):
with patch_submodule(_test_patching , 'os.rename' , a__):
with patch_submodule(_test_patching , 'os.path.dirname' , a__):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , a__):
with patch_submodule(_test_patching , 'os.path.join' , a__):
with patch_submodule(_test_patching , 'os.path.dirname' , a__):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[Any] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , a__):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , a__):
pass
| 517 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( a__) -> Union[str, Any]:
"""simple docstring"""
def decorator(a__):
_snake_case : Tuple = getattr(a__ , 'handle_key' , [])
handle += [key]
setattr(a__ , 'handle_key' , a__)
return func
return decorator
def lowerCamelCase__ ( *a__) -> List[str]:
"""simple docstring"""
def decorator(a__):
_snake_case : List[str] = getattr(a__ , 'handle_key' , [])
handle += keys
setattr(a__ , 'handle_key' , a__)
return func
return decorator
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __new__( cls : int , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
"""simple docstring"""
_snake_case : int = super().__new__(cls , snake_case , snake_case , snake_case )
if not hasattr(snake_case , 'key_handler' ):
setattr(snake_case , 'key_handler' , {} )
setattr(snake_case , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_snake_case : Optional[Any] = getattr(snake_case , 'handle_key' , [] )
for key in handled_keys:
_snake_case : str = value
return new_cls
@staticmethod
def __UpperCAmelCase ( cls : List[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : str = ord(snake_case )
_snake_case : str = cls.key_handler.get(snake_case )
if handler:
_snake_case : Optional[int] = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls) -> Optional[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy())
| 517 | 1 |
"""simple docstring"""
__a = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def A_ ( _lowercase ):
assert type(_lowercase ) in (int, float) and decimal == int(_lowercase )
snake_case_ :str = int(_lowercase )
snake_case_ :List[Any] = """"""
snake_case_ :List[str] = False
if decimal < 0:
snake_case_ :List[str] = True
decimal *= -1
while decimal > 0:
snake_case_ :Optional[Any] = divmod(_lowercase, 16 )
snake_case_ :int = values[remainder] + hexadecimal
snake_case_ :Union[str, Any] = """0x""" + hexadecimal
if negative:
snake_case_ :Dict = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: Dict=2 , snake_case: Union[str, Any]=3 , snake_case: Dict=64 , snake_case: Union[str, Any]=None ) -> Union[str, Any]:
snake_case_ :List[Any] = np.random.default_rng(snake_case )
snake_case_ :Optional[Any] = length
snake_case_ :str = rng.normal(size=(length,) ).astype(np.floataa )
snake_case_ :Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self: Any ) -> Union[str, Any]:
return self.length
def __getitem__( self: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: int , snake_case: Optional[Any]=0 , snake_case: Tuple=0 , snake_case: List[Any]=False ) -> Optional[int]:
super().__init__()
snake_case_ :str = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Tuple = True
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[Any]=None ) -> List[str]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :Union[str, Any] = False
return x * self.a[0] + self.b[0]
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: str , snake_case: List[Any]=0 , snake_case: Tuple=0 , snake_case: List[str]=False ) -> int:
super().__init__()
snake_case_ :int = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[str] = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[Any] = True
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int]=None ) -> Union[str, Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :List[str] = False
return x * self.a + self.b
def A_ ( _lowercase, _lowercase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case_ :Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ :Optional[int] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
snake_case_ :Union[str, Any] = load_dataset("""csv""", data_files=_lowercase )
snake_case_ :List[str] = datasets["""train"""].unique("""label""" )
snake_case_ :Any = {v: i for i, v in enumerate(_lowercase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ :Dict = tokenizer(
examples["""sentence1"""], examples["""sentence2"""], truncation=_lowercase, max_length=_lowercase, padding="""max_length""" )
if "label" in examples:
snake_case_ :Union[str, Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ :Any = datasets.map(
_lowercase, batched=_lowercase, remove_columns=["""sentence1""", """sentence2""", """label"""], )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase, padding="""max_length""", max_length=128, return_tensors="""pt""" )
return tokenizer.pad(_lowercase, padding="""longest""", return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ :str = DataLoader(tokenized_datasets["""train"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=2 )
snake_case_ :Any = DataLoader(tokenized_datasets["""validation"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=1 )
return train_dataloader, eval_dataloader
| 310 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
def __init__( self : int,__A : List[str],__A : Tuple,__A : str,__A : str,__A : List[str],__A : int=0.2,__A : List[str]=0.2 ):
_lowerCamelCase : int = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Union[str, Any] = conva_get[:2]
_lowerCamelCase : Any = conva_get[2]
_lowerCamelCase : int = size_pa
_lowerCamelCase : Any = rate_w
_lowerCamelCase : Any = rate_t
_lowerCamelCase : str = [
np.mat(-1 * np.random.rand(self.conva[0],self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Any = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCamelCase : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCamelCase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase_ ( self : Tuple,__A : int ):
# save model dict with pickle
_lowerCamelCase : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__A,"wb" ) as f:
pickle.dump(__A,__A )
print(f'Model saved: {save_path}' )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Dict ):
# read saved model
with open(__A,"rb" ) as f:
_lowerCamelCase : List[str] = pickle.load(__A ) # noqa: S301
_lowerCamelCase : Tuple = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
_lowerCamelCase : List[str] = model_dic.get("size_pooling1" )
_lowerCamelCase : Dict = model_dic.get("num_bp1" )
_lowerCamelCase : List[str] = model_dic.get("num_bp2" )
_lowerCamelCase : Optional[Any] = model_dic.get("num_bp3" )
_lowerCamelCase : str = model_dic.get("rate_weight" )
_lowerCamelCase : Any = model_dic.get("rate_thre" )
# create model instance
_lowerCamelCase : Union[str, Any] = CNN(__A,__A,__A,__A,__A,__A,__A )
# modify model parameter
_lowerCamelCase : Dict = model_dic.get("w_conv1" )
_lowerCamelCase : Optional[int] = model_dic.get("wkj" )
_lowerCamelCase : Optional[Any] = model_dic.get("vji" )
_lowerCamelCase : Dict = model_dic.get("thre_conv1" )
_lowerCamelCase : Tuple = model_dic.get("thre_bp2" )
_lowerCamelCase : Optional[int] = model_dic.get("thre_bp3" )
return conv_ins
def lowerCamelCase_ ( self : Optional[Any],__A : Optional[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase_ ( self : Dict,__A : str ):
return round(__A,3 )
def lowerCamelCase_ ( self : str,__A : int,__A : Any,__A : Union[str, Any],__A : Dict,__A : List[Any] ):
# convolution process
_lowerCamelCase : Optional[Any] = convs[0]
_lowerCamelCase : List[Any] = convs[1]
_lowerCamelCase : int = np.shape(__A )[0]
# get the data slice of original image data, data_focus
_lowerCamelCase : Tuple = []
for i_focus in range(0,size_data - size_conv + 1,__A ):
for j_focus in range(0,size_data - size_conv + 1,__A ):
_lowerCamelCase : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__A )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__A ):
_lowerCamelCase : Optional[int] = []
for i_focus in range(len(__A ) ):
_lowerCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus],w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__A ) )
_lowerCamelCase : Union[str, Any] = np.asmatrix(__A ).reshape(
__A,__A )
data_featuremap.append(__A )
# expanding the data slice to One dimenssion
_lowerCamelCase : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__A ) )
_lowerCamelCase : Dict = np.asarray(__A )
return focus_list, data_featuremap
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[Any],__A : int="average_pool" ):
# pooling process
_lowerCamelCase : Tuple = len(featuremaps[0] )
_lowerCamelCase : Tuple = int(size_map / size_pooling )
_lowerCamelCase : int = []
for i_map in range(len(__A ) ):
_lowerCamelCase : Optional[Any] = featuremaps[i_map]
_lowerCamelCase : int = []
for i_focus in range(0,__A,__A ):
for j_focus in range(0,__A,__A ):
_lowerCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__A ) )
_lowerCamelCase : Optional[Any] = np.asmatrix(__A ).reshape(__A,__A )
featuremap_pooled.append(__A )
return featuremap_pooled
def lowerCamelCase_ ( self : Optional[Any],__A : List[str] ):
# expanding three dimension data to one dimension list
_lowerCamelCase : Union[str, Any] = []
for i in range(len(__A ) ):
_lowerCamelCase : int = np.shape(data[i] )
_lowerCamelCase : List[str] = data[i].reshape(1,shapes[0] * shapes[1] )
_lowerCamelCase : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__A )
_lowerCamelCase : Tuple = np.asarray(__A )
return data_expanded
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
# expanding matrix to one dimension list
_lowerCamelCase : int = np.asarray(__A )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Tuple = data_mat.reshape(1,shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase_ ( self : List[str],__A : Any,__A : List[str],__A : List[Any],__A : Any,__A : Tuple ):
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = 0
for i_map in range(__A ):
_lowerCamelCase : List[Any] = np.ones((size_map, size_map) )
for i in range(0,__A,__A ):
for j in range(0,__A,__A ):
_lowerCamelCase : int = pd_pool[
i_pool
]
_lowerCamelCase : Dict = i_pool + 1
_lowerCamelCase : Any = np.multiply(
__A,np.multiply(out_map[i_map],(1 - out_map[i_map]) ) )
pd_all.append(__A )
return pd_all
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict,__A : Optional[Any],__A : Union[str, Any],__A : Optional[Any],__A : Union[str, Any],__A : Tuple=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__A )) )
print((" - - Shape: Teach_Data ", np.shape(__A )) )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[str] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_lowerCamelCase : List[Any] = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__A ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCamelCase : List[str] = np.asmatrix(datas_train[p] )
_lowerCamelCase : Dict = np.asarray(datas_teach[p] )
_lowerCamelCase , _lowerCamelCase : int = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : int = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Optional[Any] = self._expand(__A )
_lowerCamelCase : List[str] = data_bp_input
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.vji.T ) - self.thre_bpa
_lowerCamelCase : Optional[int] = self.sig(__A )
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.wkj.T ) - self.thre_bpa
_lowerCamelCase : List[Any] = self.sig(__A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCamelCase : Tuple = np.multiply(
(data_teach - bp_outa),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[str] = np.multiply(
np.dot(__A,self.wkj ),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[Any] = np.dot(__A,self.vji )
_lowerCamelCase : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCamelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCamelCase : Optional[int] = self._calculate_gradient_from_pool(
__A,__A,shape_featuremapa[0],shape_featuremapa[1],self.size_poolinga,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCamelCase : str = self._expand_mat(pd_conva_all[k_conv] )
_lowerCamelCase : Optional[int] = self.rate_weight * np.dot(__A,__A )
_lowerCamelCase : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCamelCase : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCamelCase : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCamelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCamelCase : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCamelCase : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCamelCase : List[str] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCamelCase : List[Any] = rp + 1
_lowerCamelCase : str = error_count / patterns
all_mse.append(__A )
def draw_error():
_lowerCamelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__A,"+-" )
plt.plot(__A,"r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__A,alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def lowerCamelCase_ ( self : int,__A : List[Any] ):
# model predict
_lowerCamelCase : Any = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__A )) )
for p in range(len(__A ) ):
_lowerCamelCase : Optional[int] = np.asmatrix(datas_test[p] )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : int = self._expand(__A )
_lowerCamelCase : Any = data_bp_input
_lowerCamelCase : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
_lowerCamelCase : Tuple = self.sig(__A )
_lowerCamelCase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
_lowerCamelCase : str = self.sig(__A )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCamelCase : Union[str, Any] = [list(map(self.do_round,__A ) ) for each in produce_out]
return np.asarray(__A )
def lowerCamelCase_ ( self : Any,__A : str ):
# return the data of image after convoluting process so we can check it out
_lowerCamelCase : Any = np.asmatrix(__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 44 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
# flake8: noqa
# Lint as: python3
lowerCamelCase__ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 547 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase ( lowerCamelCase__ ):
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_encoder_blocks""" ) )
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=1_3 , __lowerCamelCase : str=6_4 , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Any=[2, 2, 2, 2] , __lowerCamelCase : List[Any]=[8, 4, 2, 1] , __lowerCamelCase : Any=[1_6, 3_2, 6_4, 1_2_8] , __lowerCamelCase : int=[1, 4, 8, 1_6] , __lowerCamelCase : List[str]=[1, 2, 4, 8] , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = sr_ratios
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = downsampling_rates
UpperCAmelCase = num_attention_heads
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase = SegformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowercase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = SegformerForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = SegformerForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__UpperCamelCase )
UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = SegformerModelTester(self )
UpperCAmelCase = SegformerConfigTester(self , config_class=__UpperCamelCase )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCamelCase )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__UpperCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase = (self.model_tester.image_size // 3_2) ** 2
UpperCAmelCase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase = len(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ):
continue
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SegformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( ) ->Optional[Any]:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__UpperCamelCase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase )
with torch.no_grad():
UpperCAmelCase = model(__UpperCamelCase )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__UpperCamelCase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase )
with torch.no_grad():
UpperCAmelCase = model(__UpperCamelCase )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-1 ) )
@slow
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__UpperCamelCase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase )
with torch.no_grad():
UpperCAmelCase = model(__UpperCamelCase )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
UpperCAmelCase = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 377 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : int = value
elif weight_type == "weight_g":
__UpperCamelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : List[str] = value
elif weight_type == "bias":
__UpperCamelCase : int = value
else:
__UpperCamelCase : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Tuple = []
__UpperCamelCase : int = fairseq_model.state_dict()
__UpperCamelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCamelCase : List[str] = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : List[str] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : int = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Dict = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCamelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase : List[Any] = "weight"
else:
__UpperCamelCase : List[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Any = name.split("." )
__UpperCamelCase : Dict = int(items[0] )
__UpperCamelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
# load the pre-trained checkpoints
__UpperCamelCase : Tuple = torch.load(_lowerCAmelCase )
__UpperCamelCase : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
__UpperCamelCase : Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__UpperCamelCase : List[str] = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : Optional[int] = WavLMConfig()
__UpperCamelCase : Optional[Any] = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 327 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : int = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = emb.weight.shape
lowerCamelCase_ : List[str] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
lowerCamelCase_ : Tuple = emb.weight.data
return lin_layer
def __a ( __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = torch.load(__UpperCAmelCase , map_location="cpu" )
lowerCamelCase_ : Dict = Namespace(**checkpoint["cfg"]["model"] )
lowerCamelCase_ : Dict = checkpoint["model"]
remove_ignore_keys_(__UpperCAmelCase )
lowerCamelCase_ : Tuple = state_dict["decoder.embed_tokens.weight"].shape[0]
lowerCamelCase_ : Tuple = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
lowerCamelCase_ : str = XGLMConfig(
vocab_size=__UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase_ : Dict = XGLMForCausalLM(__UpperCAmelCase )
lowerCamelCase_ : Dict = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
print(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : int = parser.parse_args()
snake_case_ : Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 253 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 253 | 1 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_="</s>" , A_="<unk>" , A_="<pad>" , A_=125 , A_=None , **A_ , )-> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase = [F'''<extra_id_{i}>''' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase = len(set(filter(lambda A_ : bool('extra_id' in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
super().__init__(
eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = extra_ids
UpperCamelCase = 2**8 # utf is 8 bits
# define special tokens dict
UpperCamelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCamelCase = len(self.special_tokens_encoder )
UpperCamelCase = len(A_ )
for i, token in enumerate(A_ ):
UpperCamelCase = self.vocab_size + i - n
UpperCamelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ )-> List[int]:
'''simple docstring'''
if len(A_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = self._add_eos_if_not_present(A_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase = self._add_eos_if_not_present(A_ )
return token_ids_a + token_ids_a
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = [chr(A_ ) for i in text.encode('utf-8' )]
return tokens
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCamelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCamelCase = self.added_tokens_encoder[token]
elif len(A_ ) != 1:
UpperCamelCase = self.unk_token_id
else:
UpperCamelCase = ord(A_ ) + self._num_special_tokens
return token_id
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[index]
else:
UpperCamelCase = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = B''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCamelCase = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCamelCase = token.encode('utf-8' )
else:
UpperCamelCase = bytes([ord(A_ )] )
bstring += tok_string
UpperCamelCase = bstring.decode('utf-8' , errors='ignore' )
return string
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
return ()
| 3 |
from __future__ import annotations
def a__ ( snake_case__ : list[int] ):
if len(snake_case__ ) == 0:
return array
_UpperCAmelCase,_UpperCAmelCase : List[str] = min(snake_case__ ), max(snake_case__ )
# Compute the variables
_UpperCAmelCase : Tuple = _max - _min + 1
_UpperCAmelCase,_UpperCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase : Optional[int] = i - _min
_UpperCAmelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase : List[Any] = 0
for i in range(snake_case__ ):
while holes_repeat[i] > 0:
_UpperCAmelCase : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = input('Enter numbers separated by comma:\n')
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 643 | 0 |
def lowerCAmelCase__(__snake_case = 50000000 ) -> int:
'''simple docstring'''
lowerCamelCase__ = set()
lowerCamelCase__ = int((limit - 24) ** (1 / 2) )
lowerCamelCase__ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__snake_case ) ) )
for primea in primes:
lowerCamelCase__ = primea * primea
for primea in primes:
lowerCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase__ = primea * primea * primea * primea
lowerCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : List[str] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCamelCase_ ( snake_case__):
"""simple docstring"""
snake_case__ : str = 'codegen'
snake_case__ : Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=5_0_4_0_0 , UpperCAmelCase__ : Dict=2_0_4_8 , UpperCAmelCase__ : List[str]=2_0_4_8 , UpperCAmelCase__ : Any=4_0_9_6 , UpperCAmelCase__ : Optional[int]=2_8 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int="gelu_new" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Any=1E-5 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=5_0_2_5_6 , UpperCAmelCase__ : List[Any]=5_0_2_5_6 , UpperCAmelCase__ : Dict=False , **UpperCAmelCase__ : int , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_ctx
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCamelCase_ ( snake_case__):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : List[PatchingSpec] = None , UpperCAmelCase__ : bool = False , ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , "pad_token_id" , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="inputs" )
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : Any ) -> List[str]:
return self._config.n_head
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
return 1_3
| 682 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case :int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case :Optional[int] = """A painting of a squirrel eating a burger"""
__snake_case :Optional[int] = jax.device_count()
__snake_case :Optional[Any] = num_samples * [prompt]
__snake_case :int = sd_pipe.prepare_inputs(a__ )
__snake_case :Dict = replicate(a__ )
__snake_case :Optional[Any] = shard(a__ )
__snake_case :Any = jax.random.PRNGKey(0 )
__snake_case :Tuple = jax.random.split(a__ , jax.device_count() )
__snake_case :List[str] = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__snake_case :Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case :Any = images[0, 2_53:2_56, 2_53:2_56, -1]
__snake_case :int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case :Any = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :List[str] = """stabilityai/stable-diffusion-2"""
__snake_case , __snake_case :Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(a__ , subfolder="""scheduler""" )
__snake_case , __snake_case :Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case :Optional[int] = scheduler_params
__snake_case :int = """A painting of a squirrel eating a burger"""
__snake_case :Optional[int] = jax.device_count()
__snake_case :Optional[int] = num_samples * [prompt]
__snake_case :Any = sd_pipe.prepare_inputs(a__ )
__snake_case :Union[str, Any] = replicate(a__ )
__snake_case :Dict = shard(a__ )
__snake_case :Any = jax.random.PRNGKey(0 )
__snake_case :Dict = jax.random.split(a__ , jax.device_count() )
__snake_case :Dict = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__snake_case :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case :List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
__snake_case :Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case :Dict = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 708 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case__ :
'''simple docstring'''
def __init__( self , a__=2 , a__=3 , a__=64 , a__=None ) -> int:
'''simple docstring'''
__snake_case :Any = np.random.default_rng(a__ )
__snake_case :List[str] = length
__snake_case :Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__snake_case :Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self , a__ ) -> int:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case__ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case :int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case :Dict = True
def __lowercase ( self , a__=None ) -> Optional[Any]:
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case :Tuple = False
return x * self.a[0] + self.b[0]
class snake_case__ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = torch.nn.Parameter(torch.tensor(a__ ).float() )
__snake_case :List[str] = torch.nn.Parameter(torch.tensor(a__ ).float() )
__snake_case :str = True
def __lowercase ( self , a__=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case :List[Any] = False
return x * self.a + self.b
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : int = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case :Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__snake_case :Dict = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__snake_case :Any = load_dataset("""csv""" ,data_files=snake_case__ )
__snake_case :Dict = datasets["""train"""].unique("""label""" )
__snake_case :List[Any] = {v: i for i, v in enumerate(snake_case__ )}
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case :Optional[Any] = tokenizer(
examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ ,padding="""max_length""" )
if "label" in examples:
__snake_case :Dict = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case :List[str] = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""sentence1""", """sentence2""", """label"""] ,)
def collate_fn(snake_case__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(snake_case__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
__snake_case :List[Any] = DataLoader(tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=2 )
__snake_case :str = DataLoader(tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=1 )
return train_dataloader, eval_dataloader
| 291 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_snake_case = 0b101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_snake_case = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = WATERMARK_BITS
UpperCamelCase = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 2_56:
return images
UpperCamelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase = [self.encoder.encode(SCREAMING_SNAKE_CASE__ , 'dwtDct' ) for image in images]
UpperCamelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).permute(0 , 3 , 1 , 2 )
UpperCamelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 282 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( _lowercase ) -> str:
if not isinstance(_lowercase , _lowercase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426880 * Decimal(10005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13591409
UpperCamelCase = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_snake_case = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = '''falcon'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=65_024 , __SCREAMING_SNAKE_CASE : int=4_544 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=71 , __SCREAMING_SNAKE_CASE : Tuple=1E-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=11 , __SCREAMING_SNAKE_CASE : List[Any]=11 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__a = vocab_size
# Backward compatibility with n_embed kwarg
__a = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE)
__a = hidden_size if n_embed is None else n_embed
__a = num_hidden_layers
__a = num_attention_heads
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
__a = hidden_dropout
__a = attention_dropout
__a = bos_token_id
__a = eos_token_id
__a = num_attention_heads if num_kv_heads is None else num_kv_heads
__a = alibi
__a = new_decoder_architecture
__a = multi_query # Ignored when new_decoder_architecture is True
__a = parallel_attn
__a = bias
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return not self.alibi
| 60 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( _lowerCAmelCase : int = 1000000 , _lowerCAmelCase : int = 10 ) -> int:
UpperCAmelCase : defaultdict = defaultdict(_lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 127 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ ):
"""simple docstring"""
lowerCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Optional[int] = None , __snake_case : int = 50257 , __snake_case : int = 1024 , __snake_case : int = 768 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : Optional[int] = None , __snake_case : str = "gelu_new" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 1E-5 , __snake_case : float = 0.02 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = False , __snake_case : bool = False , ) -> Tuple:
super().__init__()
UpperCAmelCase : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
UpperCAmelCase : List[str] = prefix_inner_dim
UpperCAmelCase : Tuple = prefix_hidden_dim
UpperCAmelCase : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase : List[str] = (
nn.Linear(self.prefix_hidden_dim , __snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase : Dict = GPTaConfig(
vocab_size=__snake_case , n_positions=__snake_case , n_embd=__snake_case , n_layer=__snake_case , n_head=__snake_case , n_inner=__snake_case , activation_function=__snake_case , resid_pdrop=__snake_case , embd_pdrop=__snake_case , attn_pdrop=__snake_case , layer_norm_epsilon=__snake_case , initializer_range=__snake_case , scale_attn_weights=__snake_case , use_cache=__snake_case , scale_attn_by_inverse_layer_idx=__snake_case , reorder_and_upcast_attn=__snake_case , )
UpperCAmelCase : List[Any] = GPTaLMHeadModel(__snake_case )
def A ( self : Dict , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ) -> Any:
UpperCAmelCase : Optional[int] = self.transformer.transformer.wte(__snake_case )
UpperCAmelCase : Union[str, Any] = self.encode_prefix(__snake_case )
UpperCAmelCase : List[str] = self.decode_prefix(__snake_case )
UpperCAmelCase : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase : int = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase : str = self.transformer(inputs_embeds=__snake_case , labels=__snake_case , attention_mask=__snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A ( self : str , __snake_case : int , __snake_case : torch.device ) -> torch.Tensor:
return torch.zeros(__snake_case , self.prefix_length , dtype=torch.intaa , device=__snake_case )
def A ( self : Optional[Any] , __snake_case : Optional[Any] ) -> Any:
return self.encode_prefix(__snake_case )
@torch.no_grad()
def A ( self : Dict , __snake_case : int , __snake_case : Any , __snake_case : Dict ) -> Optional[Any]:
UpperCAmelCase : int = torch.split(__snake_case , 1 , dim=0 )
UpperCAmelCase : str = []
UpperCAmelCase : List[Any] = []
for feature in features:
UpperCAmelCase : Union[str, Any] = self.decode_prefix(feature.to(__snake_case ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase , UpperCAmelCase : Tuple = self.generate_beam(
input_embeds=__snake_case , device=__snake_case , eos_token_id=__snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case )
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A ( self : Any , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : str=None , __snake_case : int = 5 , __snake_case : int = 67 , __snake_case : float = 1.0 , __snake_case : Optional[int] = None , ) -> Optional[Any]:
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[int] = torch.ones(__snake_case , device=__snake_case , dtype=torch.int )
UpperCAmelCase : Union[str, Any] = torch.zeros(__snake_case , device=__snake_case , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase : str = input_embeds
else:
UpperCAmelCase : Union[str, Any] = self.transformer.transformer.wte(__snake_case )
for i in range(__snake_case ):
UpperCAmelCase : Optional[int] = self.transformer(inputs_embeds=__snake_case )
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase : Any = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase , UpperCAmelCase : Tuple = logits.topk(__snake_case , -1 )
UpperCAmelCase : Dict = generated.expand(__snake_case , *generated.shape[1:] )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase : Optional[Any] = next_tokens
else:
UpperCAmelCase : List[str] = tokens.expand(__snake_case , *tokens.shape[1:] )
UpperCAmelCase : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase : Any = -float(np.inf )
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase : List[str] = scores_sum / seq_lengths[:, None]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = scores_sum_average.view(-1 ).topk(__snake_case , -1 )
UpperCAmelCase : str = next_tokens // scores_sum.shape[1]
UpperCAmelCase : List[Any] = seq_lengths[next_tokens_source]
UpperCAmelCase : str = next_tokens % scores_sum.shape[1]
UpperCAmelCase : Optional[Any] = next_tokens.unsqueeze(1 )
UpperCAmelCase : Any = tokens[next_tokens_source]
UpperCAmelCase : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase : int = generated[next_tokens_source]
UpperCAmelCase : Any = scores_sum_average * seq_lengths
UpperCAmelCase : List[str] = is_stopped[next_tokens_source]
UpperCAmelCase : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase : List[Any] = is_stopped + next_tokens.eq(__snake_case ).squeeze()
if is_stopped.all():
break
UpperCAmelCase : str = scores / seq_lengths
UpperCAmelCase : Any = scores.argsort(descending=__snake_case )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase : Union[str, Any] = [tokens[i] for i in order]
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case , dim=0 )
UpperCAmelCase : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 127 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
A_ = b * b - 4 * a * c
A_ = (-b + sqrt(_UpperCamelCase )) / (2 * a)
A_ = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ) -> Any:
A_ ,A_ = quadratic_roots(a=5, b=6, c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 174 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__snake_case : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : Union[str, Any], _UpperCamelCase : str, _UpperCamelCase : Tuple, _UpperCamelCase : List[Any] ) -> Any:
for attribute in key.split('''.''' ):
A_ = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
A_ = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
elif weight_type == "running_mean":
A_ = value
elif weight_type == "running_var":
A_ = value
elif weight_type == "num_batches_tracked":
A_ = value
elif weight_type == "inv_freq":
A_ = value
else:
A_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Tuple, _UpperCamelCase : Dict ) -> List[Any]:
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == '''group''', )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
A_ = mapped_key.replace('''*''', _UpperCamelCase )
if "pos_bias_u" in name:
A_ = None
elif "pos_bias_v" in name:
A_ = None
elif "weight_g" in name:
A_ = '''weight_g'''
elif "weight_v" in name:
A_ = '''weight_v'''
elif "bias" in name:
A_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = '''weight'''
elif "running_mean" in name:
A_ = '''running_mean'''
elif "inv_freq" in name:
A_ = '''inv_freq'''
elif "running_var" in name:
A_ = '''running_var'''
elif "num_batches_tracked" in name:
A_ = '''num_batches_tracked'''
else:
A_ = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Dict, _UpperCamelCase : Union[str, Any], _UpperCamelCase : str, _UpperCamelCase : Optional[Any] ) -> List[Any]:
A_ = full_name.split('''conv_layers.''' )[-1]
A_ = name.split('''.''' )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Dict, _UpperCamelCase : Any=None, _UpperCamelCase : Any=None, _UpperCamelCase : Dict=True ) -> Union[str, Any]:
if config_path is not None:
A_ = WavaVecaConformerConfig.from_pretrained(_UpperCamelCase, hidden_act='''swish''' )
else:
A_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
A_ = '''rotary'''
if is_finetuned:
if dict_path:
A_ = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(_UpperCamelCase, '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ = 0
A_ = 1
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase, _UpperCamelCase )
A_ = WavaVecaCTCTokenizer(
_UpperCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=_UpperCamelCase, )
A_ = True if config.feat_extract_norm == '''layer''' else False
A_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=_UpperCamelCase, return_attention_mask=_UpperCamelCase, )
A_ = WavaVecaProcessor(feature_extractor=_UpperCamelCase, tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
A_ = WavaVecaConformerForCTC(_UpperCamelCase )
else:
A_ = WavaVecaConformerForPreTraining(_UpperCamelCase )
if is_finetuned:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A_ = argparse.Namespace(task='''audio_pretraining''' )
A_ = fairseq.tasks.setup_task(_UpperCamelCase )
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=_UpperCamelCase )
A_ = model[0].eval()
recursively_load_weights(_UpperCamelCase, _UpperCamelCase, not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 174 | 1 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase = CLIPImageProcessor()
UpperCAmelCase = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
UpperCAmelCase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 119 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int = 1000 ) -> int:
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = str(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase__ )
return sum_of_num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
SCREAMING_SNAKE_CASE : str = solution(power)
print("Sum of the digits is: ", result)
| 294 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A ={"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__UpperCAmelCase : Optional[Any] = TOKENIZER_CLASSES
else:
__UpperCAmelCase : List[str] = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__UpperCAmelCase : Any = TOKENIZER_CLASSES[tokenizer_name]
__UpperCAmelCase : Any = True
if checkpoint_name is None:
__UpperCAmelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__UpperCAmelCase : List[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = checkpoint.split('''/''' )
__UpperCAmelCase : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
__UpperCAmelCase : List[str] = checkpoint
__UpperCAmelCase : List[Any] = dump_path
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__UpperCAmelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__UpperCAmelCase : str = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__UpperCAmelCase : Optional[int] = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(_UpperCAmelCase )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__A =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 241 | 0 |
import math
SCREAMING_SNAKE_CASE : Union[str, Any] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : List[str] = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase_( lowerCamelCase_ = 20 ) -> str:
_lowercase : List[Any] = math.comb(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase_ )
_lowercase : int = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 89 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
UpperCamelCase = r'\w+[.]\d+'
UpperCamelCase = re.findall(_UpperCamelCase , _UpperCamelCase)
for pat in pats:
UpperCamelCase = key.replace(_UpperCamelCase , '_'.join(pat.split('.')))
return key
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=42) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase = flax_model.init_weights(PRNGKey(_UpperCamelCase))
UpperCamelCase = flatten_dict(_UpperCamelCase)
UpperCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = rename_key(_UpperCamelCase)
UpperCamelCase = tuple(renamed_pt_key.split('.'))
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(_UpperCamelCase)
return unflatten_dict(_UpperCamelCase)
| 280 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
a__ = {
"""169M""": 7_68,
"""430M""": 10_24,
"""1B5""": 20_48,
"""3B""": 25_60,
"""7B""": 40_96,
"""14B""": 51_20,
}
def lowercase ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
_snake_case : Dict = list(state_dict.keys() )
for name in state_dict_keys:
_snake_case : Any = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_snake_case : List[str] = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_snake_case : List[str] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_snake_case : List[str] = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
_snake_case : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_snake_case : Union[str, Any] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_snake_case : str = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_snake_case : Optional[int] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_snake_case : List[str] = """rwkv.""" + name
_snake_case : Optional[Any] = weight
return state_dict
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=None ) -> str:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_snake_case : Dict = 50_277
_snake_case : Optional[int] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_snake_case : Any = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
_snake_case : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_snake_case : int = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
_snake_case : Union[str, Any] = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
_snake_case : Any = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
_snake_case , _snake_case : int = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
_snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_snake_case : Tuple = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_snake_case : List[str] = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
a__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""image_processor""", """tokenizer"""]
snake_case_ : str = """ChineseCLIPImageProcessor"""
snake_case_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
_snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
| 198 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs]
return image_inputs
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Optional[int] = self.get_rust_tokenizer()
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_slow.save_pretrained(self.tmpdirname)
lowercase__ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_fast.save_pretrained(self.tmpdirname)
lowercase__ : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowercase__ : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0)
lowercase__ : Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Dict = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
lowercase__ : Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = """lower newer"""
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = """lower newer"""
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : str = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : int = processor.batch_decode(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = """lower newer"""
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 12 |
a : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 10000000
a : int = True
a : Any = False
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowercase = chain(next_number(_UpperCamelCase ) )
__lowercase = number_chain
while number < 10_00_00_00:
__lowercase = number_chain
number *= 10
return number_chain
def lowercase_ ( _UpperCamelCase = 10_00_00_00 ):
'''simple docstring'''
for i in range(1 , _UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 639 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__lowercase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
"""simple docstring"""
__lowercase :Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowercase :bool = field(
default=__lowerCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowercase :bool = field(
default=__lowerCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__lowercase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowercase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowercase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCAmelCase :
"""simple docstring"""
__lowercase :str = field(
default=__lowerCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowercase :str = field(
default=__lowerCAmelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__lowercase :Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
__lowercase :Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowercase :Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowercase :Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowercase :Optional[bool] = field(
default=__lowerCAmelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
__lowercase :bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__lowercase :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowercase :bool = field(
default=__lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__lowercase :bool = field(
default=__lowerCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase_ ( ):
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features['''label'''].names
# Labels
lowerCamelCase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(_lowerCamelCase : Optional[int] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCamelCase_ = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCamelCase_ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _lowerCamelCase )
trainer.save_metrics('''train''' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=_lowerCamelCase )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('''eval''' , _lowerCamelCase )
trainer.save_metrics('''eval''' , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCamelCase_ = trainer.predict(_lowerCamelCase , metric_key_prefix='''predict''' )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('''predict''' , _lowerCamelCase )
trainer.save_metrics('''predict''' , _lowerCamelCase )
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_lowerCamelCase ):
lowerCamelCase_ = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main() | 717 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
lowerCamelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase_ = {'''dtype''': torch.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = np.asarray(UpperCamelCase__ )
return torch.tensor(UpperCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase__ , '''__array__''' ) and not isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> "torch.Tensor":
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
lowerCamelCase_ = self._consolidate(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowerCamelCase_ = self._consolidate(batch[column_name] )
return batch | 66 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
__magic_name__ = "realm"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=128 , snake_case__=12 , snake_case__=12 , snake_case__=8 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=256 , snake_case__=10 , snake_case__=1E-3 , snake_case__=5 , snake_case__=320 , snake_case__=1335_3718 , snake_case__=5000 , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Common config
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : List[Any] = retriever_proj_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = num_candidates
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : Optional[Any] = layer_norm_eps
# Reader config
_lowerCAmelCase : Optional[Any] = span_hidden_size
_lowerCAmelCase : Any = max_span_width
_lowerCAmelCase : Optional[int] = reader_layer_norm_eps
_lowerCAmelCase : Any = reader_beam_size
_lowerCAmelCase : int = reader_seq_len
# Retrieval config
_lowerCAmelCase : str = num_block_records
_lowerCAmelCase : Union[str, Any] = searcher_beam_size
| 444 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = 'base_with_context'
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = ly_weight['self_attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = ly_weight['MultiHeadDotProductAttention_0']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase = jnp.tree_util.tree_map(onp.array , __lowercase )
__UpperCamelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__UpperCamelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__UpperCamelCase = inference.parse_training_gin_file(__lowercase , __lowercase )
__UpperCamelCase = inference.InferenceModel(args.checkpoint_path , __lowercase )
__UpperCamelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase )
__UpperCamelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase )
__UpperCamelCase = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase )
__UpperCamelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_snake_case = parser.parse_args()
main(args)
| 383 | 0 |
'''simple docstring'''
import argparse
import datetime
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : int = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
UpperCAmelCase_ : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase_ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
UpperCAmelCase_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
UpperCAmelCase_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
UpperCAmelCase_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
UpperCAmelCase_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
UpperCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
UpperCAmelCase_ : int = datetime.date(int(lowerCamelCase_ ) , int(lowerCamelCase_ ) , int(lowerCamelCase_ ) )
# Start math
if m <= 2:
UpperCAmelCase_ : int = y - 1
UpperCAmelCase_ : Union[str, Any] = m + 12
# maths var
UpperCAmelCase_ : int = int(str(lowerCamelCase_ )[:2] )
UpperCAmelCase_ : int = int(str(lowerCamelCase_ )[2:] )
UpperCAmelCase_ : int = int(2.6 * m - 5.39 )
UpperCAmelCase_ : int = int(c / 4 )
UpperCAmelCase_ : int = int(k / 4 )
UpperCAmelCase_ : int = int(d + k )
UpperCAmelCase_ : int = int(t + u + v + x )
UpperCAmelCase_ : int = int(z - (2 * c) )
UpperCAmelCase_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
UpperCAmelCase_ : str = F'''Your date {date_input}, is a {days[str(lowerCamelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Union[str, Any] = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
snake_case__ : Optional[Any] = parser.parse_args()
zeller(args.date_input)
| 389 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = '''mgp-str'''
def __init__( self , snake_case_=[3_2, 1_2_8] , snake_case_=4 , snake_case_=3 , snake_case_=2_7 , snake_case_=3_8 , snake_case_=5_0_2_5_7 , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = max_token_length
UpperCAmelCase_ : Union[str, Any] = num_character_labels
UpperCAmelCase_ : Union[str, Any] = num_bpe_labels
UpperCAmelCase_ : Optional[int] = num_wordpiece_labels
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : Any = distilled
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[Any] = drop_rate
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[str] = attn_drop_rate
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : List[Any] = output_aa_attentions
UpperCAmelCase_ : Optional[int] = initializer_range
| 389 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( __snake_case : Dict , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 1_00 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = False
snake_case__ :Union[str, Any] = search_prob
snake_case__ :List[Any] = start_temperate
snake_case__ :Dict = []
snake_case__ :List[Any] = 0
snake_case__ :Union[str, Any] = None
while not search_end:
snake_case__ :int = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case__ :Any = current_state
scores.append(__snake_case )
iterations += 1
snake_case__ :List[Any] = None
snake_case__ :str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case__ :Optional[int] = random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
snake_case__ :Any = neighbors.pop(__snake_case )
snake_case__ :str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case__ :Optional[int] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case__ :Optional[Any] = picked_neighbor
else:
snake_case__ :Any = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case__ :Dict = picked_neighbor
snake_case__ :List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case__ :Union[str, Any] = True
else:
snake_case__ :Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__UpperCAmelCase : Tuple = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__UpperCAmelCase : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase_ ( __snake_case : str , __snake_case : str ) -> Tuple:
'''simple docstring'''
return (3 * x**2) - (6 * y)
__UpperCAmelCase : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'''{local_min.score()}'''
)
__UpperCAmelCase : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'''{local_min.score()}'''
) | 241 |
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : List[Any] = 0
__snake_case : Any = 2
while digits < n:
index += 1
__snake_case : Optional[Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
"""simple docstring"""
from random import randint, random
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : bool = False, UpperCAmelCase_ : bool = False, UpperCAmelCase_ : int = 5, ) -> list:
"""simple docstring"""
A__ = [[-1] * number_of_cells] # Create a highway without any car
A__ = 0
A__ = max(UpperCAmelCase_, 0 )
while i < number_of_cells:
A__ = (
randint(0, UpperCAmelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1, max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( UpperCAmelCase_ : list, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
A__ = 0
A__ = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase_, -1 )
def _lowerCamelCase ( UpperCAmelCase_ : list, UpperCAmelCase_ : float, UpperCAmelCase_ : int ) -> list:
"""simple docstring"""
A__ = len(UpperCAmelCase_ )
# Beforce calculations, the highway is empty
A__ = [-1] * number_of_cells
for car_index in range(UpperCAmelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A__ = min(highway_now[car_index] + 1, UpperCAmelCase_ )
# Number of empty cell before the next car
A__ = get_distance(UpperCAmelCase_, UpperCAmelCase_ ) - 1
# We can't have the car causing an accident
A__ = min(next_highway[car_index], UpperCAmelCase_ )
if random() < probability:
# Randomly, a driver will slow down
A__ = max(next_highway[car_index] - 1, 0 )
return next_highway
def _lowerCamelCase ( UpperCAmelCase_ : list, UpperCAmelCase_ : int, UpperCAmelCase_ : float, UpperCAmelCase_ : int ) -> list:
"""simple docstring"""
A__ = len(highway[0] )
for i in range(UpperCAmelCase_ ):
A__ = update(highway[i], UpperCAmelCase_, UpperCAmelCase_ )
A__ = [-1] * number_of_cells
for car_index in range(UpperCAmelCase_ ):
A__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A__ = (car_index + speed) % number_of_cells
# Commit the change of position
A__ = speed
highway.append(UpperCAmelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Tuple = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
__lowerCamelCase : Optional[int] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BarthezTokenizer
def __init__( self : Optional[int],_A : int=None,_A : List[Any]=None,_A : Union[str, Any]="<s>",_A : Dict="</s>",_A : Union[str, Any]="</s>",_A : Union[str, Any]="<s>",_A : Optional[Any]="<unk>",_A : str="<pad>",_A : Tuple="<mask>",**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[str],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 216 | 0 |
"""simple docstring"""
from __future__ import annotations
__snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
snake_case : Tuple = []
snake_case : int = len(lowercase )
for i in range(lowercase ):
snake_case : float = -1
for j in range(i + 1 , lowercase ):
if arr[i] < arr[j]:
snake_case : List[str] = arr[j]
break
result.append(lowercase )
return result
def __lowerCAmelCase ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
snake_case : List[Any] = []
for i, outer in enumerate(lowercase ):
snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case : Any = inner
break
result.append(lowercase )
return result
def __lowerCAmelCase ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
snake_case : Optional[Any] = len(lowercase )
snake_case : list[float] = []
snake_case : list[float] = [-1] * arr_size
for index in reversed(range(lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case : str = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__snake_case = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 702 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = 8
# DPR tok
snake_case : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
snake_case : Union[str, Any] = os.path.join(UpperCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Optional[Any] = {"unk_token": "<unk>"}
snake_case : Tuple = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
snake_case : Optional[Any] = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case : int = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCamelCase ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCamelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_dataset()
snake_case : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case : int = dataset
snake_case : int = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_dataset()
snake_case : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case : str = os.path.join(self.tmpdirname , "dataset" )
snake_case : Any = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case : Any = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case : str = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ ) , )
return retriever
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case : Dict = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case : int = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case : Any = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCamelCase__ , open(UpperCamelCase__ , "wb" ) )
snake_case : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case : Dict = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = 1
snake_case : Any = self.get_dummy_canonical_hf_index_retriever()
snake_case : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : List[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = 1
snake_case : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : int = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : int = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = 1
snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : List[str] = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Any = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = 1
snake_case : Tuple = self.get_dummy_legacy_index_retriever()
snake_case : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : int = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
snake_case : Tuple = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Optional[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
snake_case : str = 1
snake_case : Dict = self.get_dummy_canonical_hf_index_retriever()
snake_case : str = [[5, 7], [10, 11]]
snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : int = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
snake_case ,snake_case ,snake_case : Dict = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
snake_case : Tuple = retriever(
UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ , return_tensors="pt" , )
snake_case ,snake_case ,snake_case ,snake_case : str = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.get_dpr_ctx_encoder_tokenizer()
snake_case : Union[str, Any] = 1
snake_case : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCamelCase__ )
snake_case : str = [[5, 7], [10, 11]]
snake_case : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case : Dict = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
self.assertEqual(
len(UpperCamelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCamelCase__ ) # check for doc token related keys in dictionary.
| 117 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''dandelin/vilt-b32-finetuned-vqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowerCAmelCase = '''image_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = AutoModelForVisualQuestionAnswering
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.pre_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
with torch.no_grad():
return self.model(**_SCREAMING_SNAKE_CASE ).logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 30 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = str(id_)
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = {} # {vertex:distance}
def __lt__( self, __a):
'''simple docstring'''
return self.key < other.key
def __repr__( self):
'''simple docstring'''
return self.id
def snake_case__ ( self, __a):
'''simple docstring'''
self.neighbors.append(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = weight
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for u in graph:
_lowerCAmelCase : List[Any] = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = graph[:]
while q:
_lowerCAmelCase : Any = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase : Union[str, Any] = u
_lowerCAmelCase : str = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for u in graph:
_lowerCAmelCase : str = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
_lowerCAmelCase : List[Any] = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase : str = u
_lowerCAmelCase : Dict = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.