code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Dict = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase_ = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = list(s_dict.keys() )
for key in keys:
lowercase__ : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : Any = new_key.replace(__lowerCamelCase , __lowerCamelCase )
print(f"""{key} -> {new_key}""" )
lowercase__ : Dict = s_dict.pop(__lowerCamelCase )
return s_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = emb.weight.shape
lowercase__ : str = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase__ : List[str] = emb.weight.data
return lin_layer
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bytes:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowercase__ : Optional[int] = os.path.basename(__lowerCamelCase )
lowercase__ : Dict = url.split('''/''' )[-2]
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not os.path.isfile(__lowerCamelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__lowerCamelCase ):
lowercase__ : List[Any] = open(__lowerCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__lowerCamelCase ) as source, open(__lowerCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=__lowerCamelCase , unit_divisor=10_24 ) as loop:
while True:
lowercase__ : List[Any] = source.read(81_92 )
if not buffer:
break
output.write(__lowerCamelCase )
loop.update(len(__lowerCamelCase ) )
lowercase__ : int = open(__lowerCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
if ".pt" not in checkpoint_path:
lowercase__ : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : Optional[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
lowercase__ : Tuple = original_checkpoint['''dims''']
lowercase__ : Optional[Any] = original_checkpoint['''model_state_dict''']
lowercase__ : Optional[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__lowerCamelCase )
rename_keys(__lowerCamelCase )
lowercase__ : List[Any] = True
lowercase__ : Dict = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowercase__ : Optional[int] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCamelCase , decoder_ffn_dim=__lowerCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowercase__ : Any = WhisperForConditionalGeneration(__lowerCamelCase )
lowercase__ , lowercase__ : Any = model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if len(__lowerCamelCase ) > 0 and not set(__lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ : int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : Optional[Any] = proj_out_weights
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 16 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """vit_msn"""
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-06 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = qkv_bias
| 128 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 253 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = "ZinengTang/tvlt-base"
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : int ) -> List[str]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = np.ones([12_000] )
UpperCAmelCase_ : Dict = feature_extractor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[Any] = processor(audio=lowerCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = np.ones([12_000] )
UpperCAmelCase_ : int = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = processor(audio=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 253 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = job["""started_at"""]
SCREAMING_SNAKE_CASE = job["""completed_at"""]
SCREAMING_SNAKE_CASE = date_parser.parse(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = date_parser.parse(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE = start
SCREAMING_SNAKE_CASE = end
SCREAMING_SNAKE_CASE = duration_in_min
return job_info
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
SCREAMING_SNAKE_CASE = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 296 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowercase ( a__ : int , a__ : int , a__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_UpperCamelCase = b * b - 4 * a * c
_UpperCamelCase = (-b + sqrt(a__ )) / (2 * a)
_UpperCamelCase = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase ( ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 351 | """simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Any ) -> Dict:
_UpperCamelCase = parent
_UpperCamelCase = config_class
_UpperCamelCase = has_text_modality
_UpperCamelCase = kwargs
_UpperCamelCase = common_properties
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
_UpperCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , '''config.json''' )
config_first.to_json_file(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : int ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> Any:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCamelCase ( self : Any ) -> str:
if self.config_class.is_composition:
return
_UpperCamelCase = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase = copy.deepcopy(__UpperCamelCase )
_UpperCamelCase = self.config_class(**__UpperCamelCase )
_UpperCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
_UpperCamelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _UpperCamelCase ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 54 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DPMSolverSinglestepScheduler,)
__UpperCamelCase = (('''num_inference_steps''', 25),)
def lowerCamelCase__ ( self : Any , **UpperCamelCase_ : List[str] ):
lowerCAmelCase : Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any]=0 , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : int = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : str = self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowerCAmelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase, lowerCAmelCase : List[str] = sample, sample
for t in range(UpperCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowerCAmelCase : Optional[Any] = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Tuple ):
pass
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str=0 , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : str = dict(self.forward_default_kwargs )
lowerCAmelCase : str = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowerCAmelCase : Dict = self.dummy_sample
lowerCAmelCase : Tuple = 0.1 * sample
lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : str = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowerCAmelCase : str = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : List[Any] ):
if scheduler is None:
lowerCAmelCase : Tuple = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = 1_0
lowerCAmelCase : Dict = self.dummy_model()
lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Any = 5_0
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase : int = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowerCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def lowerCamelCase__ ( self : Union[str, Any] ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Tuple = self.full_loop(scheduler=UpperCamelCase_ )
lowerCAmelCase : str = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
lowerCAmelCase : Any = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=UpperCamelCase_ )
lowerCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def lowerCamelCase__ ( self : str ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , algorithm_type='''dpmsolver++''' , solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = self.full_loop(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
assert not torch.isnan(UpperCamelCase_ ).any(), "Samples have nan numbers"
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(lower_order_final=UpperCamelCase_ )
self.check_over_configs(lower_order_final=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase__ ( self : Dict ):
self.check_over_configs(variance_type=UpperCamelCase_ )
self.check_over_configs(variance_type='''learned_range''' )
def lowerCamelCase__ ( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCamelCase_ , time_step=0 )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.full_loop()
lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.full_loop(use_karras_sigmas=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=UpperCamelCase_ )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Any = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = 1_0
lowerCAmelCase : Dict = self.dummy_model()
lowerCAmelCase : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 60 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = DebertaVaTokenizer
_A : Any = DebertaVaTokenizerFast
_A : Union[str, Any] = True
_A : Tuple = True
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int , __a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = """this is a test"""
__lowercase : Dict = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = """<pad>"""
__lowercase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30001 )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """ \tHeLLo!how \n Are yoU? """
__lowercase : Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase : Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
__lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = """I was born in 92000, and this is falsé."""
__lowercase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = """ \tHeLLo!how \n Are yoU? """
__lowercase : Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase : int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.get_tokenizer()
__lowercase : str = self.get_rust_tokenizer()
__lowercase : Dict = """I was born in 92000, and this is falsé."""
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
__lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__lowercase : Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Tuple = tokenizer.encode(__lowerCamelCase )
__lowercase : Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """This is a test"""
__lowercase : str = [13, 1, 4398, 25, 21, 1289]
__lowercase : int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase : Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase : str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
__lowercase : Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
__lowercase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowercase : Any = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__lowercase : Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase : Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase : str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = DebertaVaTokenizer(__lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
__lowercase : Any = tokenizer.encode("""multi-sequence build""" )
__lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__lowercase : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = {"""input_ids""": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , ) | 368 |
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 306 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = 384
if "tiny" in model_name:
lowerCAmelCase_ : Tuple = [3, 3, 9, 3]
lowerCAmelCase_ : List[str] = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase_ : List[Any] = [3, 3, 27, 3]
lowerCAmelCase_ : List[str] = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase_ : Optional[int] = [3, 3, 27, 3]
lowerCAmelCase_ : List[str] = [128, 256, 512, 1024]
lowerCAmelCase_ : int = 512
if "large" in model_name:
lowerCAmelCase_ : List[str] = [3, 3, 27, 3]
lowerCAmelCase_ : int = [192, 384, 768, 1536]
lowerCAmelCase_ : List[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase_ : Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase_ : Optional[int] = [256, 512, 1024, 2048]
lowerCAmelCase_ : Optional[Any] = 1024
# set label information
lowerCAmelCase_ : Tuple = 150
lowerCAmelCase_ : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase_ : str = '''ade20k-id2label.json'''
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Dict = ConvNextConfig(
depths=__UpperCamelCase ,hidden_sizes=__UpperCamelCase ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCAmelCase_ : List[Any] = UperNetConfig(
backbone_config=__UpperCamelCase ,auxiliary_in_channels=__UpperCamelCase ,num_labels=__UpperCamelCase ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def UpperCamelCase( __UpperCamelCase : List[Any] ):
lowerCAmelCase_ : List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ):
lowerCAmelCase_ : Any = dct.pop(__UpperCamelCase )
lowerCAmelCase_ : Tuple = val
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : List[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCAmelCase_ : str = model_name_to_url[model_name]
lowerCAmelCase_ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='''cpu''' )['''state_dict''']
lowerCAmelCase_ : Optional[int] = get_upernet_config(__UpperCamelCase )
lowerCAmelCase_ : Any = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ : Dict = state_dict.pop(__UpperCamelCase )
if "bn" in key:
lowerCAmelCase_ : List[str] = key.replace('''bn''' ,'''batch_norm''' )
lowerCAmelCase_ : Tuple = val
# rename keys
lowerCAmelCase_ : str = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
lowerCAmelCase_ : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ : Tuple = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ).convert('''RGB''' )
lowerCAmelCase_ : Dict = SegformerImageProcessor()
lowerCAmelCase_ : Any = processor(__UpperCamelCase ,return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ : str = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase_ : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase_ : Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase_ : Dict = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = math.inf , UpperCAmelCase__ : float = -math.inf , UpperCAmelCase__ : float = math.inf , UpperCAmelCase__ : float = -math.inf , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : float = 100 , UpperCAmelCase__ : float = 0.01 , UpperCAmelCase__ : float = 1 , ) -> Any:
lowercase_ : int = False
lowercase_ : Any = search_prob
lowercase_ : Optional[int] = start_temperate
lowercase_ : List[str] = []
lowercase_ : Any = 0
lowercase_ : Optional[int] = None
while not search_end:
lowercase_ : Union[str, Any] = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase_ : Dict = current_state
scores.append(_UpperCamelCase )
iterations += 1
lowercase_ : int = None
lowercase_ : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase_ : List[Any] = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
lowercase_ : Union[str, Any] = neighbors.pop(_UpperCamelCase )
lowercase_ : str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase_ : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase_ : Optional[int] = picked_neighbor
else:
lowercase_ : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase_ : Any = picked_neighbor
lowercase_ : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase_ : Optional[int] = True
else:
lowercase_ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> int:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowercase : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowercase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_lowercase : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowercase : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> int:
return (3 * x**2) - (6 * y)
_lowercase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowercase : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"""{local_min.score()}"""
)
_lowercase : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowercase : Any = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"""{local_min.score()}"""
)
| 368 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : List[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
lowercase_ : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Any = np.asarray(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = chunk_data
else:
lowercase_ : Any = data
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str:
if name in group.attrs:
lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ : int = []
lowercase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 21 | 0 |
_SCREAMING_SNAKE_CASE = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 180 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ : Any = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
a_ : Tuple = mock.Mock()
a_ : Optional[int] = 5_0_0
a_ : Union[str, Any] = {}
a_ : int = HTTPError
a_ : List[Any] = {}
# Download this model to make sure it's in the cache.
a_ : Tuple = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
a_ : str = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
a_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> Any:
a_ : int = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
a_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='test-feature-extractor' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
a_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
a_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
a_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
a_ : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
a_ : Tuple = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 120 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]="" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="train" ) -> Tuple:
assert os.path.isdir(SCREAMING_SNAKE_CASE__ )
a_ : int = []
a_ : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.documents )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
a_ : int = self.documents[idx]
a_ : Tuple = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as source:
a_ : Dict = source.read()
a_ , a_ : Optional[Any] = process_story(SCREAMING_SNAKE_CASE__ )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = list(filter(lambda __A : len(__A ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
a_ : List[Any] = [_add_missing_period(__A ) for line in nonempty_lines]
# gather article lines
a_ : int = []
a_ : List[Any] = deque(__A )
while True:
try:
a_ : Dict = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a_ : List[str] = list(filter(lambda __A : not t.startswith('@highlight' ) , __A ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
a_ : Any = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if len(__A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__A )) )
return sequence
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str ) -> Any:
"""simple docstring"""
a_ : Optional[int] = torch.ones_like(__A )
a_ : List[str] = sequence == pad_token_id
a_ : str = 0
return mask
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : Dict ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = [tokenizer.encode(__A ) for line in story_lines]
a_ : int = [token for sentence in story_lines_token_ids for token in sentence]
a_ : Dict = [tokenizer.encode(__A ) for line in summary_lines]
a_ : int = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
a_ : int = []
for sequence in batch:
a_ : int = -1
a_ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__A )
return torch.tensor(__A )
| 120 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
SCREAMING_SNAKE_CASE__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0Xe000
SCREAMING_SNAKE_CASE__ = 0Xe001
SCREAMING_SNAKE_CASE__ = 0Xe002
SCREAMING_SNAKE_CASE__ = 0Xe003
SCREAMING_SNAKE_CASE__ = 0Xe004
# Maps special codepoints to human-readable names.
SCREAMING_SNAKE_CASE__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
SCREAMING_SNAKE_CASE__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a_ ( lowerCamelCase ):
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2048 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase = UNICODE_VOCAB_SIZE
UpperCamelCase = len(self._special_codepoints )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return list(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"invalid token: '{token}'" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"invalid id: {index}" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return "".join(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[int]:
"""simple docstring"""
return ()
| 321 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 79 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Union[str, Any] = -1
a : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Tuple = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a : List[str] = TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : int = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : List[str] = -1
a : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Union[str, Any] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Dict = tokenizer.decode(greedy_ids[0] )
a : str = TextIteratorStreamer(lowerCAmelCase__ )
a : str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
a : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[str] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
a : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a : Tuple = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
a : Any = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = torch.ones((1, 5) , device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a : Any = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a : Optional[int] = cs.out[:-1] # Remove the final "\n"
a : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __a ( self ) -> Dict:
a : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : str = -1
a : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[Any] = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001 )
a : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : int = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
a : int = ""
for new_text in streamer:
streamer_text += new_text
| 79 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase : Dict = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls :Dict ) -> Optional[Any]:
__UpperCamelCase : Dict = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls :Tuple ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _lowerCamelCase ( self :Optional[int] ) -> int:
__UpperCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__UpperCamelCase : Union[str, Any] = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , repo_id="test-config" , push_to_hub=a , use_auth_token=self._token )
__UpperCamelCase : Any = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self :Union[str, Any] ) -> List[str]:
__UpperCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__UpperCamelCase : Union[str, Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id="valid_org/test-config-org" , push_to_hub=a , use_auth_token=self._token )
__UpperCamelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
CustomConfig.register_for_auto_class()
__UpperCamelCase : Any = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__UpperCamelCase : List[str] = c.n_embd + 1 # int
__UpperCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
__UpperCamelCase : int = not c.scale_attn_weights # bool
__UpperCamelCase : Union[str, Any] = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(a , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(a , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(a , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(a , c.summary_type , "mismatch for key: summary_type" )
def _lowerCamelCase ( self :List[str] ) -> Any:
__UpperCamelCase : Optional[Any] = PretrainedConfig()
__UpperCamelCase : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
a , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__UpperCamelCase : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(a , a )]
if len(a ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(a )}.' )
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
with self.assertRaises(a ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCamelCase : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__UpperCamelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(a )
def _lowerCamelCase ( self :Tuple ) -> Dict:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase : Dict = mock.Mock()
__UpperCamelCase : Optional[Any] = 5_0_0
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = HTTPError
__UpperCamelCase : Tuple = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=a ) as mock_head:
__UpperCamelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self :str ) -> Any:
# This test is for deprecated behavior and can be removed in v5
__UpperCamelCase : str = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : Any = AutoConfig.from_pretrained("bert-base-cased" )
__UpperCamelCase : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(a )
__UpperCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(a , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__UpperCamelCase : List[Any] = ["config.42.0.0.json"]
__UpperCamelCase : Any = 7_6_8
configuration.save_pretrained(a )
shutil.move(os.path.join(a , "config.4.0.0.json" ) , os.path.join(a , "config.42.0.0.json" ) )
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained(a )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__UpperCamelCase : List[str] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__UpperCamelCase : int = "v4.0.0"
__UpperCamelCase , __UpperCamelCase : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
a , return_unused_kwargs=a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__UpperCamelCase : Optional[Any] = "v3.0.0"
__UpperCamelCase : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(a )
self.assertEqual(old_configuration.hidden_size , 7_6_8 ) | 232 |
from __future__ import annotations
import math
lowercase : Any = '2020.9.26'
lowercase : Union[str, Any] = 'xcodz-dot, cclaus, dhruvmanila'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int)) for val in locals().values()):
__UpperCamelCase : str = F'Input values must either be float or int: {list(locals().values())}'
raise TypeError(_lowerCamelCase)
__UpperCamelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCamelCase : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("Axis must be a str")
__UpperCamelCase : str = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int)) for val in input_variables.values()):
__UpperCamelCase : Dict = (
"Input values except axis must either be float or int: "
F'{list(input_variables.values())}'
)
raise TypeError(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCamelCase : Tuple = x * math.cos(_lowerCamelCase) - y * math.sin(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = y * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z
elif axis == "x":
__UpperCamelCase : Dict = y * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + y * math.sin(_lowerCamelCase)
__UpperCamelCase : List[str] = x
elif axis == "y":
__UpperCamelCase : Any = x * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Dict = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }") | 232 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _lowerCAmelCase ( UpperCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int =checkpoints.load_tax_checkpoint(UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] =flatten_dict(UpperCAmelCase )
return flax_params
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] ={}
UpperCamelCase__ : List[Any] ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCamelCase__ : str ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase__ : int ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase__ : int =new_key.replace(UpperCAmelCase , UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase__ : Any =new_key.replace(UpperCAmelCase , UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase__ : Optional[int] =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , UpperCAmelCase )
UpperCamelCase__ : int =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase__ : Optional[int] =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , UpperCAmelCase )
UpperCamelCase__ : int =flax_dict[key]
UpperCamelCase__ : int ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase__ : Tuple =torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase__ : int =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _lowerCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =get_flax_param(UpperCAmelCase )
if not use_large:
UpperCamelCase__ : Dict =PixaStructVisionConfig()
UpperCamelCase__ : Any =PixaStructTextConfig()
else:
UpperCamelCase__ : Dict =PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCamelCase__ : str =PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
UpperCamelCase__ : Optional[int] =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCAmelCase )
UpperCamelCase__ : str =PixaStructForConditionalGeneration(UpperCAmelCase )
UpperCamelCase__ : Any =rename_and_convert_flax_params(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor()
UpperCamelCase__ : Any =PixaStructProcessor(image_processor=UpperCAmelCase , tokenizer=UpperCAmelCase )
if use_large:
UpperCamelCase__ : Optional[Any] =4_096
UpperCamelCase__ : Optional[int] =True
# mkdir if needed
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
print('''Model saved in {}'''.format(UpperCAmelCase ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 157 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( snake_case__ , snake_case__ , unittest.TestCase):
UpperCAmelCase__ : str = IFPipeline
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self :Tuple ) -> List[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self :str , _A :int , _A :Optional[int]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase_ ( self :int ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self :Dict ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
__A = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_A , tokenizer=_A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
__A , __A = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__A = None
__A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_A , _A , _A , _A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__A = IFImgaImgPipeline(**pipe_a.components )
__A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_A , _A , _A , _A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__A = IFInpaintingPipeline(**pipe_a.components )
__A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_A , _A , _A , _A )
def lowercase_ ( self :Optional[Any] , _A :Dict , _A :int , _A :Optional[Any] , _A :Dict ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , num_inference_steps=2 , generator=_A , output_type='np' , )
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , generator=_A , num_inference_steps=2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (256, 256, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_A , _A )
def lowercase_ ( self :Union[str, Any] , _A :Tuple , _A :Tuple , _A :str , _A :Optional[int] ) -> List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , num_inference_steps=2 , generator=_A , output_type='np' , )
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A )
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (256, 256, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_A , _A )
def lowercase_ ( self :Tuple , _A :Dict , _A :Optional[Any] , _A :Optional[int] , _A :Dict ) -> str:
'''simple docstring'''
_start_torch_memory_measurement()
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A )
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , num_inference_steps=2 , generator=_A , output_type='np' , )
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A )
__A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A )
__A = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_A )
__A = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (256, 256, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_A , _A )
def snake_case ( )-> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 161 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,**_snake_case ):
super().__init__(**_snake_case )
requires_backends(self ,"vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,_snake_case ,**_snake_case ):
return super().__call__(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : str = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCAmelCase_ : str = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case="This is a photo of {}." ):
UpperCAmelCase_ : Union[str, Any] = load_image(_snake_case )
UpperCAmelCase_ : str = self.image_processor(images=[image] ,return_tensors=self.framework )
UpperCAmelCase_ : Any = candidate_labels
UpperCAmelCase_ : Optional[int] = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
UpperCAmelCase_ : List[Any] = [text_inputs]
return inputs
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = model_inputs.pop("candidate_labels" )
UpperCAmelCase_ : Any = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase_ : str = text_inputs[0][0]
UpperCAmelCase_ : List[Any] = self.model(**_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = model_outputs.pop("candidate_labels" )
UpperCAmelCase_ : List[Any] = model_outputs["logits"][0]
if self.framework == "pt":
UpperCAmelCase_ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = [scores]
elif self.framework == "tf":
UpperCAmelCase_ : Optional[Any] = stable_softmax(_snake_case ,axis=-1 )
UpperCAmelCase_ : int = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase_ : Any = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 67 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : Optional[int] = ['text', 'image', 'audio']
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class _A :
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : List[str] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs ):
SCREAMING_SNAKE_CASE_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 253 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> str:
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
a = ConvNextVaModel(config=A )
model.to(A )
model.eval()
a = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
a = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
a = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
a = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a : Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a : str = False
a : Any = False
a : Optional[int] = False
a : str = False
a : Tuple = False
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
a = model_class(A )
model.to(A )
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(A , A ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(A , A , A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(A )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
a = model(**A )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 180 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : str = TypeVar("T")
class a__ ( Generic[T] ):
def __init__( self , A = True ) -> None:
'''simple docstring'''
a = {} # dictionary of lists
a = directed
def lowerCAmelCase_ ( self , A , A ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
self.adj_list[destination_vertex].append(A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A )
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a = [destination_vertex]
a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a = [destination_vertex]
a = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 180 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = FunnelTokenizer
lowercase = FunnelTokenizerFast
lowercase = True
lowercase = True
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase_ : str = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase__ )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase__ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Optional[Any] = 'UNwant\u00E9d,running'
lowercase_ : str = 'unwanted, running'
return input_text, output_text
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = self.tokenizer_class(self.vocab_file )
lowercase_ : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) ,[7, 4, 5, 10, 8, 9] )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
lowercase_ : Tuple = tokenizer('UNwant\u00E9d,running' )
lowercase_ : str = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len )
lowercase_ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ,'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 213 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a :str = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 329 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase_ : str = {'facebook/blenderbot_small-90M': 5_12}
def _lowerCamelCase ( lowercase : List[Any] ) -> List[str]:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(lowercase )
return pairs
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : Optional[int]="__start__" , __a : Union[str, Any]="__end__" , __a : Any="__unk__" , __a : Union[str, Any]="__null__" , **__a : Tuple , ):
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
_a = json.load(__a )
_a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_a = merges_handle.read().split("\n" )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = {}
@property
def UpperCamelCase__ ( self : Any ):
return len(self.encoder )
def UpperCamelCase__ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Tuple , __a : str ):
if token in self.cache:
return self.cache[token]
_a = re.sub("([.,!?()])" , r" \1" , __a )
_a = re.sub("(')" , r" \1 " , __a )
_a = re.sub(r"\s{2,}" , " " , __a )
if "\n" in token:
_a = token.replace("\n" , " __newln__" )
_a = token.split(" " )
_a = []
for token in tokens:
if not len(__a ):
continue
_a = token.lower()
_a = tuple(__a )
_a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
_a = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__a ):
try:
_a = word.index(__a , __a )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__a )
_a = new_word
if len(__a ) == 1:
break
else:
_a = get_pairs(__a )
_a = "@@ ".join(__a )
_a = word[:-4]
_a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
_a = []
_a = re.findall(r"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self : Optional[int] , __a : str ):
_a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : Tuple , __a : List[str] ):
_a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 63 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306 | 0 |
import qiskit
def snake_case (__lowercase = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_snake_case : Optional[Any] = qubits
# Using Aer's simulator
_snake_case : Optional[int] = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
_snake_case : Union[str, Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_SCREAMING_SNAKE_CASE ) ) , list(range(_SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_snake_case : Dict = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''') | 351 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case :
def __init__( self : Any , a__ : Any , a__ : Any=13 , a__ : str=2 , a__ : List[Any]=24 , a__ : Tuple=16 , a__ : Any=True , a__ : Any=True , a__ : Optional[Any]=32 , a__ : int=5 , a__ : List[Any]=4 , a__ : Optional[int]=37 , a__ : Optional[Any]="gelu" , a__ : Optional[Any]=0.1 , a__ : Tuple=0.1 , a__ : List[Any]=10 , a__ : Optional[int]=0.0_2 , a__ : Any=None , a__ : Dict=2 , a__ : int=2 , ) -> int:
'''simple docstring'''
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def a_ ( self : int ) -> Any:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def a_ ( self : int , a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
_A = ASTModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_values": input_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : str , a__ : Optional[int] , a__ : Optional[Any] , a__ : Dict , a__ : Any , a__ : Dict ) -> Optional[int]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["input_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Any ) -> str:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@slow
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Any:
_A = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_A , _A = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(a__ )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(a__ , sampling_rate=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
# verify the logits
_A = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , a__ )
_A = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) | 163 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def UpperCamelCase_ ( A__ : dict ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {key: len(A__ ) for key, value in gen_kwargs.items() if isinstance(A__ , A__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowerCAmelCase_ : Tuple = max(lists_lengths.values() , default=0 )
return max(1 , A__ )
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
for group_idx in range(A__ ):
lowerCAmelCase_ : List[str] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCAmelCase_ : List[str] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCAmelCase_ : Any = range(A__ , start + num_shards_to_add )
shards_indices_per_group.append(A__ )
return shards_indices_per_group
def UpperCamelCase_ ( A__ : dict , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = _number_of_shards_in_gen_kwargs(A__ )
if num_shards == 1:
return [dict(A__ )]
else:
lowerCAmelCase_ : Tuple = _distribute_shards(num_shards=A__ , max_num_jobs=A__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A__ , A__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A__ ) )
]
def UpperCamelCase_ ( A__ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase_ ( A__ : np.random.Generator , A__ : dict ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = {len(A__ ) for value in gen_kwargs.values() if isinstance(A__ , A__ )}
lowerCAmelCase_ : List[str] = {}
for size in list_sizes:
lowerCAmelCase_ : Optional[int] = list(range(A__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCAmelCase_ : int = dict(A__ )
for key, value in shuffled_kwargs.items():
if isinstance(A__ , A__ ):
lowerCAmelCase_ : int = [value[i] for i in indices_per_size[len(A__ )]]
return shuffled_kwargs
| 120 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : str = mock.Mock()
lowerCAmelCase_ : Optional[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase ) as mock_head:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Dict ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ) -> str:
lowerCAmelCase_ : Dict = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""test-feature-extractor""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 120 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase_ ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase_ = 'MCTCTFeatureExtractor'
lowerCamelCase_ = 'AutoTokenizer'
def __init__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = self.feature_extractor
_SCREAMING_SNAKE_CASE = False
def __call__( self : str , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_SCREAMING_SNAKE_CASE = kwargs.pop("raw_speech" )
else:
_SCREAMING_SNAKE_CASE = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = kwargs.pop("text" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE = args[0]
_SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def lowerCAmelCase_ ( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = kwargs.pop("input_features" , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = kwargs.pop("labels" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE = args[0]
_SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE = labels["""input_ids"""]
return input_features
def lowerCAmelCase_ ( self : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer
yield
_SCREAMING_SNAKE_CASE = self.feature_extractor
_SCREAMING_SNAKE_CASE = False
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''efficientnet'''
def __init__( self : Optional[Any] , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 6_0_0 , __lowerCamelCase : float = 2.0 , __lowerCamelCase : float = 3.1 , __lowerCamelCase : int = 8 , __lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase : List[int] = [] , __lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase : float = 0.2_5 , __lowerCamelCase : str = "swish" , __lowerCamelCase : int = 2_5_6_0 , __lowerCamelCase : str = "mean" , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 0.0_0_1 , __lowerCamelCase : float = 0.9_9 , __lowerCamelCase : float = 0.5 , __lowerCamelCase : float = 0.2 , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = width_coefficient
_SCREAMING_SNAKE_CASE = depth_coefficient
_SCREAMING_SNAKE_CASE = depth_divisor
_SCREAMING_SNAKE_CASE = kernel_sizes
_SCREAMING_SNAKE_CASE = in_channels
_SCREAMING_SNAKE_CASE = out_channels
_SCREAMING_SNAKE_CASE = depthwise_padding
_SCREAMING_SNAKE_CASE = strides
_SCREAMING_SNAKE_CASE = num_block_repeats
_SCREAMING_SNAKE_CASE = expand_ratios
_SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = pooling_type
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = batch_norm_eps
_SCREAMING_SNAKE_CASE = batch_norm_momentum
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = drop_connect_rate
_SCREAMING_SNAKE_CASE = sum(__lowerCamelCase ) * 4
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return 1e-5
| 111 | 0 |
'''simple docstring'''
from PIL import Image
def __lowercase ( __lowercase ) -> Image:
'''simple docstring'''
_A , _A = image.size
_A = 0
_A = image.load()
for i in range(__lowercase ):
for j in range(__lowercase ):
_A = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowercase ):
for i in range(__lowercase ):
_A = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase_ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 79 |
'''simple docstring'''
from PIL import Image
def __lowercase ( __lowercase , __lowercase ) -> Image:
'''simple docstring'''
_A = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowercase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCamelCase_ = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 79 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def lowerCAmelCase_ ( snake_case_ ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_A : str = key.replace(""".model.1.bias""",""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_A : Dict = key.replace(""".model.1.weight""",""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_A : Any = key.replace(""".model.3.bias""",""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_A : Any = key.replace(""".model.3.weight""",""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_A : List[str] = key.replace("""conditioner_blocks.0""","""conditioner_blocks""" )
if "prime_prior" in key:
_A : Union[str, Any] = key.replace("""prime_prior""","""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_A : Any = key.replace(""".emb.""",""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""",""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""","""metadata_embedding.""" )
if "x_emb.emb." in key:
_A : Optional[int] = key.replace("""0.x_emb.emb""","""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""","""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""",""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""","""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""","""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""","""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""","""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""","""embed_tokens""" )
return key
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : int = {}
import re
_A : Optional[int] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_A : Optional[int] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Optional[int] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_A : List[str] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_A : List[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Optional[int] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_A : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_A : Tuple = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Dict = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
_A : List[str] = re_encoder_block_conv_in.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
_A : Optional[int] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_A : int = re_encoder_block_conv_in.sub(snake_case_,snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
_A : Optional[int] = re_encoder_block_resnet.match(snake_case_ )
_A : Optional[int] = regex_match.groups()
_A : Dict = int(groups[2] ) * 2 + int(groups[3] )
_A : str = {"""1""": 1, """3""": 2}[groups[-2]]
_A : List[str] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : List[Any] = prefix + resnet_block
_A : Tuple = re_encoder_block_resnet.sub(snake_case_,snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
_A : Dict = re_encoder_block_proj_out.match(snake_case_ )
_A : Union[str, Any] = regex_match.groups()
_A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_A : List[Any] = re_encoder_block_proj_out.sub(snake_case_,snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
_A : Optional[Any] = re_decoder_block_conv_out.match(snake_case_ )
_A : Dict = regex_match.groups()
_A : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_A : int = re_decoder_block_conv_out.sub(snake_case_,snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
_A : Optional[Any] = re_decoder_block_resnet.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A : List[str] = {"""1""": 1, """3""": 2}[groups[-2]]
_A : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : int = prefix + resnet_block
_A : List[str] = re_decoder_block_resnet.sub(snake_case_,snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
_A : List[Any] = re_decoder_block_proj_in.match(snake_case_ )
_A : Optional[Any] = regex_match.groups()
_A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_A : Dict = re_decoder_block_proj_in.sub(snake_case_,snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
_A : List[Any] = re_prior_cond_conv_out.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A : Optional[int] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_A : Tuple = re_prior_cond_conv_out.sub(snake_case_,snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
_A : Optional[int] = re_prior_cond_resnet.match(snake_case_ )
_A : Optional[Any] = regex_match.groups()
_A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
_A : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_A : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : Union[str, Any] = prefix + resnet_block
_A : Union[str, Any] = re_prior_cond_resnet.sub(snake_case_,snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
_A : List[Any] = re_prior_cond_proj_in.match(snake_case_ )
_A : Optional[int] = regex_match.groups()
_A : Union[str, Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_A : str = re_prior_cond_proj_in.sub(snake_case_,snake_case_ )
# keep original key
else:
_A : Any = original_key
_A : Dict = replace_key(snake_case_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_A : List[str] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_A : List[str] = original_key
_A : Optional[int] = original_key
_A : Optional[int] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_=None,snake_case_=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_A : Any = requests.get(f'''{PREFIX}{file}''',allow_redirects=snake_case_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''',exist_ok=snake_case_ )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''',"""wb""" ).write(r.content )
_A : Optional[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_A : List[Any] = JukeboxConfig.from_pretrained(snake_case_ )
_A : Optional[Any] = JukeboxModel(snake_case_ )
_A : List[str] = []
_A : List[str] = {}
for i, dict_name in enumerate(snake_case_ ):
_A : List[str] = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
_A : List[Any] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_A : Optional[int] = old_dic[k]
elif k.endswith(""".w""" ):
_A : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_A : Union[str, Any] = old_dic[k]
else:
_A : List[Any] = old_dic[k]
_A : Dict = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
_A : Union[str, Any] = fix_jukebox_keys(snake_case_,model.state_dict(),snake_case_,snake_case_ )
weight_dict.append(snake_case_ )
_A : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''',"""w""" ) as txtfile:
json.dump(snake_case_,snake_case_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
from manim import *
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: int ) -> Optional[Any]:
__UpperCAmelCase : str = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : int = Text("CPU" , font_size=24 )
__UpperCAmelCase : Union[str, Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [mem.copy() for i in range(4 )]
__UpperCAmelCase : Tuple = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : List[str] = Text("GPU" , font_size=24 )
__UpperCAmelCase : Dict = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
__UpperCAmelCase : int = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : int = Text("Model" , font_size=24 )
__UpperCAmelCase : Tuple = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
__UpperCAmelCase : List[str] = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
__UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
__UpperCAmelCase : List[str] = Text("Loaded Checkpoint" , font_size=24 )
__UpperCAmelCase : Union[str, Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCAmelCase : List[str] = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = []
for i, rect in enumerate(__lowerCamelCase ):
__UpperCAmelCase : List[str] = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
__UpperCAmelCase : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait()
| 157 | import numpy as np
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[Any] =logging.get_logger(__name__)
_A : int ={
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """open-llama"""
def __init__( self: Union[str, Any] , UpperCamelCase__: str=100_000 , UpperCamelCase__: Dict=4_096 , UpperCamelCase__: Optional[int]=11_008 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: Dict="silu" , UpperCamelCase__: Dict=2_048 , UpperCamelCase__: str=0.02 , UpperCamelCase__: str=1e-6 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Any=0 , UpperCamelCase__: str=1 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Union[str, Any]=None , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Union[str, Any] = rms_norm_eps
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = kwargs.pop(
"""use_memorry_efficient_attention""" , UpperCamelCase__ )
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_dropout_prob
lowerCamelCase__ : Optional[Any] = use_stable_embedding
lowerCamelCase__ : List[str] = shared_input_output_embedding
lowerCamelCase__ : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase_ ( self: Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
lowerCamelCase__ : Optional[int] = self.rope_scaling.get("""type""" , UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.rope_scaling.get("""factor""" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 129 |
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 129 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple ="char"
lowerCamelCase : List[str] ="bpe"
lowerCamelCase : Tuple ="wp"
__UpperCAmelCase =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[str] =["image_processor", "char_tokenizer"]
lowerCamelCase : List[Any] ="ViTImageProcessor"
lowerCamelCase : Tuple ="MgpstrTokenizer"
def __init__( self : int , a : str=None , a : int=None , **a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('''gpt2''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(a , a )
def __call__( self : Optional[Any] , a : Tuple=None , a : Dict=None , a : List[Any]=None , **a : Optional[int] ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__lowerCamelCase = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(a , '''char''' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(a , '''bpe''' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(a , '''wp''' )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(a ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Tuple , a : List[str] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = '''[s]'''
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = '''#'''
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 1_02
__lowerCamelCase = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase = decoder(a )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase = preds_str[index].find(a )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Union[str, Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 67 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __magic_name__ ( lowercase__):
UpperCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 350 | '''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21 | 0 |
import requests
_SCREAMING_SNAKE_CASE = '' # <-- Put your OpenWeatherMap appid here!
_SCREAMING_SNAKE_CASE = 'https://api.openweathermap.org/data/2.5/'
def snake_case ( snake_case__ :str = "Chicago" , snake_case__ :str = APPID) -> dict:
return requests.get(URL_BASE + """weather""" , params=locals()).json()
def snake_case ( snake_case__ :str = "Kolkata, India" , snake_case__ :str = APPID) -> dict:
return requests.get(URL_BASE + """forecast""" , params=locals()).json()
def snake_case ( snake_case__ :float = 55.68 , snake_case__ :float = 12.57 , snake_case__ :str = APPID) -> dict:
return requests.get(URL_BASE + """onecall""" , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_SCREAMING_SNAKE_CASE = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 180 | import torch
from torch import nn
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> Any:
super().__init__()
_A = n_token
_A = d_embed
_A = d_proj
_A = cutoffs + [n_token]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_A = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_A = nn.Parameter(torch.zeros(self.n_clusters ) )
_A = nn.ModuleList()
_A = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
_A = keep_order
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if proj is None:
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_A = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_A = hidden[..., :-1, :].contiguous()
_A = labels[..., 1:].contiguous()
_A = hidden.view(-1 , hidden.size(-1 ) )
_A = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_A = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_A = labels != -1_00
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = 0
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_A = (labels >= l_idx) & (labels < r_idx)
_A = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_A = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
_A = head_logprob.index_select(0 , lowerCAmelCase_ )
_A = hidden.index_select(0 , lowerCAmelCase_ )
else:
_A = hidden
if i == 0:
if labels is not None:
_A = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_A = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_A = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = head_logprob[:, -i] + tail_logprob_i
_A = logprob_i
return out
| 180 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=1_0 , _UpperCamelCase=3 , _UpperCamelCase=3_2 * 4 , _UpperCamelCase=3_2 * 6 , _UpperCamelCase=4 , _UpperCamelCase=3_2 , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[int] = use_auxiliary_loss
UpperCAmelCase_ : List[str] = num_queries
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : int = min_size
UpperCAmelCase_ : List[Any] = max_size
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : List[Any] = mask_feature_size
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
UpperCAmelCase_ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
UpperCAmelCase_ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
UpperCAmelCase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Dict = output.encoder_hidden_states
UpperCAmelCase_ : str = output.pixel_decoder_hidden_states
UpperCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_config.decoder_layers )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
with torch.no_grad():
UpperCAmelCase_ : Dict = MaskFormerModel(config=_a )
model.to(_a )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(pixel_values=_a , pixel_mask=_a )
UpperCAmelCase_ : int = model(_a , output_hidden_states=_a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Optional[int] = MaskFormerForInstanceSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
UpperCAmelCase_ : str = model(_a )
comm_check_on_output(_a )
UpperCAmelCase_ : List[Any] = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : Any = False
_snake_case : Any = False
_snake_case : Any = False
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = MaskFormerModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class(_a )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ : Optional[int] = MaskFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = (self.model_tester.min_size,) * 2
UpperCAmelCase_ : List[str] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_a ).long(),
}
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_a )
UpperCAmelCase_ : Optional[int] = model(**_a )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(_a ).to(_a )
UpperCAmelCase_ : Dict = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ : List[str] = self.all_model_classes[1]
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = model_class(_a )
model.to(_a )
model.train()
UpperCAmelCase_ : List[str] = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.all_model_classes[1]
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[str] = model_class(_a )
model.to(_a )
model.train()
UpperCAmelCase_ : Dict = model(_a , mask_labels=_a , class_labels=_a )
UpperCAmelCase_ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase = 1E-4
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Dict:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_a )
UpperCAmelCase_ : int = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(_a , return_tensors='pt' ).to(_a )
UpperCAmelCase_ : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCAmelCase_ : Any = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_a )
.eval()
)
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(_a , return_tensors='pt' ).to(_a )
UpperCAmelCase_ : Tuple = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_a )
# masks_queries_logits
UpperCAmelCase_ : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ : str = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
UpperCAmelCase_ : Optional[int] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
UpperCAmelCase_ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ : List[str] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_a )
.eval()
)
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = image_processor(_a , return_tensors='pt' ).to(_a )
UpperCAmelCase_ : Any = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_a )
# masks_queries_logits
UpperCAmelCase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ : str = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
UpperCAmelCase_ : Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
UpperCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ : Dict = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_a )
.eval()
)
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase_ : Union[str, Any] = inputs["""pixel_values"""].to(_a )
UpperCAmelCase_ : Tuple = [el.to(_a ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_ : Tuple = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 362 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__UpperCAmelCase = 'Usage of script: script_name <size_of_canvas:int>'
__UpperCAmelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
UpperCAmelCase_ : Tuple = bool(random.getrandbits(1 ) )
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.array(__snake_case )
UpperCAmelCase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
UpperCAmelCase_ : Optional[int] = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase_ : List[Any] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowercase__ ( __snake_case : bool , __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase_ : List[Any] = pt
if pt:
if alive < 2:
UpperCAmelCase_ : str = False
elif alive == 2 or alive == 3:
UpperCAmelCase_ : int = True
elif alive > 3:
UpperCAmelCase_ : List[Any] = False
else:
if alive == 3:
UpperCAmelCase_ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__UpperCAmelCase = int(sys.argv[1])
# main working structure of this module.
__UpperCAmelCase = create_canvas(canvas_size)
seed(c)
__UpperCAmelCase , __UpperCAmelCase = plt.subplots()
fig.show()
__UpperCAmelCase = ListedColormap(['w', 'k'])
try:
while True:
__UpperCAmelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 145 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : int = False
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
return TrainCommand(_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _lowercase ( UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : int = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCamelCase__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCamelCase__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCamelCase__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCamelCase__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCamelCase__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : List[Any] = logging.get_logger("transformers-cli/training" )
lowerCamelCase : str = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase__ )
lowerCamelCase : Any = args.output
lowerCamelCase : List[Any] = args.column_label
lowerCamelCase : List[Any] = args.column_text
lowerCamelCase : List[Any] = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase : Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowerCamelCase : Dict = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase : Optional[Any] = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase : Optional[int] = args.validation_split
lowerCamelCase : Dict = args.train_batch_size
lowerCamelCase : Union[str, Any] = args.valid_batch_size
lowerCamelCase : Any = args.learning_rate
lowerCamelCase : Optional[int] = args.adam_epsilon
def _lowercase ( self ) -> Any:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase ( self ) -> Any:
raise NotImplementedError
def _lowercase ( self ) -> Any:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 48 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : List[Any] = 16
_lowercase : List[str] = 32
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ) -> Optional[Any]:
lowercase_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase_ : int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : Optional[int] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Union[str, Any] = 8
else:
lowercase_ : List[str] = None
return tokenizer.pad(
UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase_ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Optional[int] = mocked_dataloaders # noqa: F811
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> List[str]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1":
lowercase_ : List[str] = 2
# Initialize accelerator
lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : List[Any] = config["""lr"""]
lowercase_ : Optional[Any] = int(config["""num_epochs"""] )
lowercase_ : Optional[int] = int(config["""seed"""] )
lowercase_ : Any = int(config["""batch_size"""] )
lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCAmelCase__ )
def inner_training_loop(UpperCAmelCase__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
lowercase_ , lowercase_ : int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate scheduler
lowercase_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : str = model(**UpperCAmelCase__ )
lowercase_ : Tuple = outputs.loss
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Tuple = model(**UpperCAmelCase__ )
lowercase_ : int = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase ( ) -> int:
lowercase_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase_ : int = parser.parse_args()
lowercase_ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 21 | '''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( a__ , a__ ):
@register_to_config
def __init__( self , a = 1_28 , a = 2_56 , a = 2000.0 , a = 7_68 , a = 12 , a = 12 , a = 64 , a = 20_48 , a = 0.1 , ):
super().__init__()
UpperCamelCase__ = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
UpperCamelCase__ = nn.Embedding(a , a )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
UpperCamelCase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , a , a , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ = self.position_encoding(a )
UpperCamelCase__ = self.continuous_inputs_projection(a )
inputs += position_encodings
UpperCamelCase__ = self.dropout(a )
# decoder: No padding present.
UpperCamelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
UpperCamelCase__ = self.decoder_norm(a )
UpperCamelCase__ = self.post_dropout(a )
UpperCamelCase__ = self.spec_out(a )
return spec_out
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def __a ( self , a , a=None , a=None , a=None , a=None , a=None , ):
UpperCamelCase__ = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
UpperCamelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ = self.layer[-1](a , a )
return (hidden_states,)
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
# pre_self_attention_layer_norm
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.FiLMLayer(a , a )
# Self-attention block
UpperCamelCase__ = self.attention(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a ):
super().__init__()
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
UpperCamelCase__ = self.layer_norm(a )
UpperCamelCase__ = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ = hidden_states + self.dropout(a )
return layer_output
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None ):
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.film(a , a )
UpperCamelCase__ = self.DenseReluDense(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(a )
UpperCamelCase__ = NewGELUActivation()
def __a ( self , a ):
UpperCamelCase__ = self.act(self.wi_a(a ) )
UpperCamelCase__ = self.wi_a(a )
UpperCamelCase__ = hidden_gelu * hidden_linear
UpperCamelCase__ = self.dropout(a )
UpperCamelCase__ = self.wo(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.ones(a ) )
UpperCamelCase__ = eps
def __a ( self , a ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
UpperCamelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
def __a ( self , a ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(a , 3.0 )) ))
class lowercase_ ( nn.Module ):
def __init__( self , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , out_features * 2 , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = self.scale_bias(a )
UpperCamelCase__ , UpperCamelCase__ = torch.chunk(a , 2 , -1 )
UpperCamelCase__ = x * (1 + scale) + shift
return x
| 80 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 353 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str ="""data2vec-audio"""
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.0_2 , __a=1e-5 , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=16 , __a=19 , __a=5 , __a=0.0_5 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="sum" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = conv_pos_kernel_size
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = xvector_output_dim
@property
def snake_case ( self ):
return math.prod(self.conv_stride )
| 259 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__A =namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
UpperCAmelCase__ : Any = TestCommand(*UpperCamelCase__ )
test_command.run()
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , """README.md""" )
assert os.path.exists(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = getattr(dataset_infos["""default"""] , UpperCamelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected | 163 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(_UpperCAmelCase) == len(_UpperCAmelCase), F'''{len(_UpperCAmelCase)} != {len(_UpperCAmelCase)}'''
dest_layers.load_state_dict(layers_to_copy.state_dict())
a_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ : Optional[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
try:
SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''')
return list(range(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''')
elif n_teacher == n_student:
return list(range(_UpperCAmelCase))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = "student" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
SCREAMING_SNAKE_CASE = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
AutoTokenizer.from_pretrained(_UpperCAmelCase).save_pretrained(_UpperCAmelCase) # purely for convenience
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase), F'''teacher must be a model or string got type {type(_UpperCAmelCase)}'''
SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE = teacher_e
if d is None:
SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d})
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers'):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE = teacher_e
if d is None:
SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , 'num_encoder_layers'):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d})
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase)
# Copy weights
SCREAMING_SNAKE_CASE = teacher.config_class(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(range(_UpperCAmelCase)), list(range(_UpperCAmelCase))
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''')
student.save_pretrained(_UpperCAmelCase)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase)
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase)
try:
if hasattr(
_UpperCAmelCase , 'prophetnet'): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase)
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase)
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''')
SCREAMING_SNAKE_CASE = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 327 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 327 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__lowerCamelCase : List[Any] = hex_num[0] == '-'
if is_negative:
__lowerCamelCase : Dict = hex_num[1:]
try:
__lowerCamelCase : List[str] = int(lowerCamelCase__ , 1_6 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__lowerCamelCase : Tuple = ''
while int_num > 0:
__lowerCamelCase : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : str = Dict[str, Any]
__UpperCAmelCase : int = List[Prediction]
@add_end_docstrings(__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , *A : Optional[int] , **A : Optional[int] ):
super().__init__(*A , **A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCAmelCase__ ( self : List[str] , **A : Tuple ):
__snake_case: List[str] = {}
if "threshold" in kwargs:
__snake_case: Optional[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : int , *A : Optional[Any] , **A : Tuple ):
return super().__call__(*A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[Any] = load_image(A )
__snake_case: Dict = torch.IntTensor([[image.height, image.width]] )
__snake_case: str = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__snake_case: Optional[Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__snake_case: Any = target_size
return inputs
def UpperCAmelCase__ ( self : Optional[int] , A : Dict ):
__snake_case: int = model_inputs.pop("""target_size""" )
__snake_case: int = self.model(**A )
__snake_case: Any = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__snake_case: Optional[int] = model_inputs["""bbox"""]
return model_outputs
def UpperCAmelCase__ ( self : List[Any] , A : Optional[int] , A : Union[str, Any]=0.9 ):
__snake_case: Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case , __snake_case: Union[str, Any] = target_size[0].tolist()
def unnormalize(A : Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__snake_case , __snake_case: Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case: List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case: int = [unnormalize(A ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__snake_case: int = ["""score""", """label""", """box"""]
__snake_case: List[Any] = [dict(zip(A , A ) ) for vals in zip(scores.tolist() , A , A ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case: Tuple = self.image_processor.post_process_object_detection(A , A , A )
__snake_case: Optional[Any] = raw_annotations[0]
__snake_case: int = raw_annotation["""scores"""]
__snake_case: int = raw_annotation["""labels"""]
__snake_case: Optional[Any] = raw_annotation["""boxes"""]
__snake_case: Union[str, Any] = scores.tolist()
__snake_case: List[str] = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case: List[str] = [self._get_bounding_box(A ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case: List[Any] = ["""score""", """label""", """box"""]
__snake_case: Dict = [
dict(zip(A , A ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCAmelCase__ ( self : Optional[Any] , A : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__snake_case , __snake_case , __snake_case , __snake_case: Union[str, Any] = box.int().tolist()
__snake_case: Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 111 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Optional[int] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=None ):
"""simple docstring"""
require_version(deps[pkg] , lowercase__ )
| 57 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 57 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(A )
class __lowerCAmelCase ( A ):
def __init__( self : List[str] , *A : Tuple , **A : str) -> Dict:
"""simple docstring"""
super().__init__(*A , **A)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self : Any , A : Optional[Any]=None , A : Union[str, Any]=None , A : Optional[int]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one')
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **A : Any) -> Dict:
"""simple docstring"""
return super().__call__(A , **A)
def _lowerCamelCase ( self : Any , A : Union[str, Any] , A : str=None) -> str:
"""simple docstring"""
_UpperCAmelCase = load_image(A)
if prompt is not None:
if not isinstance(A , A):
raise ValueError(
F"Received an invalid text input, got - {type(A)} - but expected a single string. "
'Note also that one single text can be provided for conditional image to text generation.')
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
_UpperCAmelCase = self.tokenizer(text=A , add_special_tokens=A).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(A).unsqueeze(0)
model_inputs.update({'input_ids': input_ids})
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=A , header_text=A , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
_UpperCAmelCase = self.tokenizer(A , return_tensors=self.framework)
model_inputs.update(A)
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation")
else:
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : Union[str, Any]=None) -> List[Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , A)
and all(x is None for x in model_inputs['input_ids'])
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name)
_UpperCAmelCase = self.model.generate(A , **A , **A)
return model_outputs
def _lowerCamelCase ( self : Optional[int] , A : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
A , skip_special_tokens=A , )
}
records.append(A)
return records
| 339 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 1 |
UpperCamelCase_ = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__A ) )
def lowerCamelCase_ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(__A ) )
if __name__ == "__main__":
print(solution())
| 357 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59 | 0 |
from jiwer import compute_measures
import datasets
__snake_case : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__snake_case : Optional[Any] ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__snake_case : Any ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] ,)
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__lowerCamelCase ,__lowerCamelCase )["wer"]
else:
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 0
for prediction, reference in zip(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Dict = compute_measures(__lowerCamelCase ,__lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 129 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =(DPMSolverSDEScheduler,)
snake_case_ =10
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCamelCase )
return config
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Dict = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = output.prev_sample
lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = output.prev_sample
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 129 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 358 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list[list] ) -> list[list]:
'''simple docstring'''
_UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(__lowercase ):
_UpperCAmelCase = row[0]
for column_index, column in enumerate(__lowercase ):
if magnitude == 0:
_UpperCAmelCase = column
continue
_UpperCAmelCase = column / magnitude
# Subtract to cancel term
_UpperCAmelCase = current_set[0]
_UpperCAmelCase = [first_row]
_UpperCAmelCase = current_set[1::]
for row in current_set:
_UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__lowercase )
continue
for column_index in range(len(__lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_UpperCAmelCase = final_set[0]
_UpperCAmelCase = []
_UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_UpperCAmelCase = simplify(__lowercase )
for i in range(len(__lowercase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __lowercase )
_UpperCAmelCase = resultant
return final_set
def UpperCAmelCase_ ( __lowercase : list[list] ) -> list:
'''simple docstring'''
if len(__lowercase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_UpperCAmelCase = len(__lowercase ) + 1
if any(len(__lowercase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__lowercase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
_UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
_UpperCAmelCase = data_set.copy()
_UpperCAmelCase = []
for row_index, row in enumerate(__lowercase ):
if 0 not in row:
_UpperCAmelCase = data_set.pop(__lowercase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , __lowercase )
_UpperCAmelCase = data_set.copy()
_UpperCAmelCase = simplify(__lowercase )
_UpperCAmelCase = simplified[::-1]
_UpperCAmelCase = []
for row in simplified:
_UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_UpperCAmelCase = row.copy()[: len(__lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__lowercase ) == 0:
solutions.append(0 )
continue
_UpperCAmelCase = temp_row[1::]
_UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(__lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(__lowercase )
_UpperCAmelCase = []
for item in solutions:
final.append(float(round(__lowercase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 22 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
while b:
lowerCamelCase , lowerCamelCase : Tuple = b, a % b
return a
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(a_, a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 205 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_A = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_, a_ )
_A = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = list(s_dict.keys() )
for key in keys:
lowerCamelCase : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase : Optional[int] = new_key.replace(a_, a_ )
print(F"""{key} -> {new_key}""" )
lowerCamelCase : Any = s_dict.pop(a_ )
return s_dict
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = emb.weight.shape
lowerCamelCase : Dict = nn.Linear(a_, a_, bias=a_ )
lowerCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
os.makedirs(a_, exist_ok=a_ )
lowerCamelCase : Union[str, Any] = os.path.basename(a_ )
lowerCamelCase : Any = url.split('/' )[-2]
lowerCamelCase : Tuple = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a_ ):
lowerCamelCase : Union[str, Any] = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a_ ) as source, open(a_, 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ), ncols=80, unit='iB', unit_scale=a_, unit_divisor=1024 ) as loop:
while True:
lowerCamelCase : Union[str, Any] = source.read(8192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
lowerCamelCase : int = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase : str = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )
lowerCamelCase : List[str] = original_checkpoint['dims']
lowerCamelCase : Any = original_checkpoint['model_state_dict']
lowerCamelCase : Tuple = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
lowerCamelCase : List[Any] = True
lowerCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowerCamelCase : Optional[int] = WhisperConfig(
vocab_size=dimensions['n_vocab'], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'], )
lowerCamelCase : Union[str, Any] = WhisperForConditionalGeneration(a_ )
lowerCamelCase , lowerCamelCase : Optional[int] = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 205 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int = 10**9 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }')
| 53 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_UpperCamelCase : Optional[int] = "src/transformers"
_UpperCamelCase : Dict = "docs/source/en/tasks"
def snake_case (A_ :Dict , A_ :List[str] , A_ :str ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Dict = f.readlines()
# Find the start prompt.
a : Union[str, Any] = 0
while not lines[start_index].startswith(A_ ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
while not lines[end_index].startswith(A_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
_UpperCamelCase : Any = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_UpperCamelCase : str = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def snake_case (A_ :str ):
'''simple docstring'''
a : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
a : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A_ , set() )
a : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def snake_case (A_ :int , A_ :Tuple=False ):
'''simple docstring'''
a : Dict = _find_text_in_file(
filename=os.path.join(A_ , A_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a : Tuple = get_model_list_for_task(A_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A_ , A_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 186 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , lowerCamelCase_ , )
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : int = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase , _lowercase : Dict = image[0].size
_lowercase , _lowercase : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowercase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : Optional[Any] = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Optional[int] = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Dict = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : List[Any] = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return mask
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : Dict = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowercase , _lowercase : Dict = mask[0].size
_lowercase , _lowercase : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowercase : List[str] = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowercase : Any = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Any = mask.astype(np.floataa ) / 2_55.0
_lowercase : List[str] = 0
_lowercase : Dict = 1
_lowercase : int = torch.from_numpy(lowerCamelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
_lowercase : Tuple = torch.cat(lowerCamelCase_ , dim=0 )
return mask
class _lowerCamelCase( _a ):
lowercase_ : UNetaDModel
lowercase_ : RePaintScheduler
def __init__( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 2_50, lowerCamelCase = 0.0, lowerCamelCase = 10, lowerCamelCase = 10, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_lowercase : Tuple = image
_lowercase : List[str] = _preprocess_image(lowerCamelCase)
_lowercase : List[Any] = original_image.to(device=self.device, dtype=self.unet.dtype)
_lowercase : int = _preprocess_mask(lowerCamelCase)
_lowercase : Dict = mask_image.to(device=self.device, dtype=self.unet.dtype)
_lowercase : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : int = original_image.shape
_lowercase : Dict = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase, self.device)
_lowercase : Optional[Any] = eta
_lowercase : Dict = self.scheduler.timesteps[0] + 1
_lowercase : Optional[Any] = generator[0] if isinstance(lowerCamelCase, lowerCamelCase) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
_lowercase : int = self.unet(lowerCamelCase, lowerCamelCase).sample
# compute previous image: x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowercase : int = self.scheduler.undo_step(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = t
_lowercase : Dict = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Optional[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Tuple = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 21 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.2_5) = }""")
print(F"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 148 | 0 |
"""simple docstring"""
from collections import defaultdict
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Dict = first_str.lower().strip()
lowerCAmelCase_ :List[str] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase_ :List[Any] = first_str.replace(""" """ , """""" )
lowerCAmelCase_ :int = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
lowerCAmelCase_ :defaultdict[str, int] = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = input('Enter the first string ').strip()
__UpperCAmelCase = input('Enter the second string ').strip()
__UpperCAmelCase = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 84 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
snake_case__ : Optional[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 44 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__UpperCAmelCase = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = """https://pypi.org/pypi/diffusers/json"""
lowerCAmelCase_ :List[Any] = json.loads(request.urlopen(lowercase__ ).read() )["""releases"""].keys()
return sorted(lowercase__ , key=lambda lowercase__ : version.Version(lowercase__ ) )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ :List[str] = Path(lowercase__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( lowercase__ : Union[str, os.PathLike] ) -> List[str]:
'''simple docstring'''
init_hf_modules()
lowerCAmelCase_ :int = Path(lowercase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ :Tuple = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( lowercase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase_ :int = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowercase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowercase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowercase__ ) )
def _snake_case ( lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = False
lowerCAmelCase_ :List[str] = [module_file]
lowerCAmelCase_ :Any = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase_ :Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowercase__ ) )
lowerCAmelCase_ :int = Path(lowercase__ ).parent
lowerCAmelCase_ :Tuple = [str(module_path / m ) for m in new_imports]
lowerCAmelCase_ :Tuple = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase_ :str = [f"""{f}.py""" for f in new_import_files]
lowerCAmelCase_ :Union[str, Any] = len(lowercase__ ) == 0
all_relative_imports.extend(lowercase__ )
return all_relative_imports
def _snake_case ( lowercase__ : int ) -> Dict:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = f.read()
# Imports of the form `import xxx`
lowerCAmelCase_ :Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , lowercase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowercase__ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase_ :Union[str, Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
lowerCAmelCase_ :Union[str, Any] = list(set(lowercase__ ) )
lowerCAmelCase_ :Optional[int] = []
for imp in imports:
try:
importlib.import_module(lowercase__ )
except ImportError:
missing_packages.append(lowercase__ )
if len(lowercase__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(lowercase__ )}. Run `pip install {" ".join(lowercase__ )}`""" )
return get_relative_imports(lowercase__ )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = module_path.replace(os.path.sep , """.""" )
lowerCAmelCase_ :List[str] = importlib.import_module(lowercase__ )
if class_name is None:
return find_pipeline_class(lowercase__ )
return getattr(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowerCAmelCase_ :Optional[int] = dict(inspect.getmembers(lowercase__ , inspect.isclass ) )
lowerCAmelCase_ :str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowercase__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
lowerCAmelCase_ :Dict = cls
return pipeline_class
def _snake_case ( lowercase__ : Union[str, os.PathLike] , lowercase__ : str , lowercase__ : Optional[Union[str, os.PathLike]] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[Dict[str, str]] = None , lowercase__ : Optional[Union[bool, str]] = None , lowercase__ : Optional[str] = None , lowercase__ : bool = False , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(lowercase__ )
lowerCAmelCase_ :List[str] = os.path.join(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ):
lowerCAmelCase_ :List[Any] = module_file_or_url
lowerCAmelCase_ :Union[str, Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
lowerCAmelCase_ :Tuple = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase_ :Any = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase_ :Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
lowerCAmelCase_ :List[Any] = f"""v{revision}"""
elif revision == "main":
lowerCAmelCase_ :Tuple = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
lowerCAmelCase_ :Any = COMMUNITY_PIPELINES_URL.format(revision=lowercase__ , pipeline=lowercase__ )
try:
lowerCAmelCase_ :Any = cached_download(
lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , local_files_only=lowercase__ , use_auth_token=lowercase__ , )
lowerCAmelCase_ :Any = """git"""
lowerCAmelCase_ :str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase_ :Optional[Any] = hf_hub_download(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , local_files_only=lowercase__ , use_auth_token=lowercase__ , )
lowerCAmelCase_ :Any = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
lowerCAmelCase_ :str = check_imports(lowercase__ )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase_ :Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowercase__ )
lowerCAmelCase_ :Optional[Any] = Path(lowercase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowercase__ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase_ :Tuple = f"""{module_needed}.py"""
shutil.copy(os.path.join(lowercase__ , lowercase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = use_auth_token
elif use_auth_token is True:
lowerCAmelCase_ :Any = HfFolder.get_token()
else:
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Dict = model_info(lowercase__ , revision=lowercase__ , token=lowercase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase_ :Optional[int] = submodule_path / commit_hash
lowerCAmelCase_ :Any = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowercase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowercase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowercase__ , f"""{module_needed}.py""" , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
return os.path.join(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Union[str, os.PathLike] , lowercase__ : str , lowercase__ : Optional[str] = None , lowercase__ : Optional[Union[str, os.PathLike]] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[Dict[str, str]] = None , lowercase__ : Optional[Union[bool, str]] = None , lowercase__ : Optional[str] = None , lowercase__ : bool = False , **lowercase__ : str , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = get_cached_module_file(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
return get_class_in_module(lowercase__ , final_module.replace(""".py""" , """""" ) )
| 84 |
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _A ( lowercase ):
"""simple docstring"""
# vision encoder
if "img_encoder.pos_embed" in name:
a =name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
a =name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
a =name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
a =name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
a =name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
a =name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
a =name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
a =name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
a =name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
a =name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
a =name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
a =name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
a =name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
a =name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
a =name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
a =name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
a =name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
a =name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
a =name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
a =name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
a =name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
a =name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
a =name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a =key.split('''.''' )
a , a =int(key_split[2] ), int(key_split[4] )
a =config.vision_config.hidden_size
if "weight" in key:
a =val[:dim, :]
a =val[dim : dim * 2, :]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[dim : dim * 2]
a =val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a =key.split('''.''' )
a =int(key_split[3] )
a =config.text_config.hidden_size
if "weight" in key:
a =val[:dim, :]
a =val[
dim : dim * 2, :
]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[dim : dim * 2]
a =val[-dim:]
else:
a =rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
a =val.squeeze_()
else:
a =val
return orig_state_dict
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ):
"""simple docstring"""
a =GroupViTConfig()
a =GroupViTModel(lowercase ).eval()
a =torch.load(lowercase , map_location='''cpu''' )['''model''']
a =convert_state_dict(lowercase , lowercase )
a , a =model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
a =CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
a =prepare_img()
a =processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase , padding=lowercase , return_tensors='''pt''' )
with torch.no_grad():
a =model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
a =torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
a =torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1E-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print('''Successfully saved processor and model to''' , lowercase )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowercase , organization='''nielsr''' )
model.push_to_hub(lowercase , organization='''nielsr''' )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 215 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =tokenizer(example['''content'''] , truncation=lowercase )['''input_ids''']
a =len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCamelCase_ : Optional[int] = HfArgumentParser(PretokenizationArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : Tuple = multiprocessing.cpu_count()
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCamelCase_ : Any = time.time()
lowerCamelCase_ : int = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowerCamelCase_ : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s') | 215 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , use_stable_embedding=__a , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = OpenLlamaModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaModel(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
__lowerCAmelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase : List[str] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Any =(
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int =False
__UpperCAmelCase : List[Any] =False
def snake_case ( self ):
__lowerCAmelCase = OpenLlamaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "single_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "multi_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def snake_case ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
__lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = OpenLlamaModel(__a )
original_model.to(__a )
original_model.eval()
__lowerCAmelCase = original_model(__a ).last_hidden_state
__lowerCAmelCase = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = {"type": scaling_type, "factor": 1_0.0}
__lowerCAmelCase = OpenLlamaModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__lowerCAmelCase = scaled_model(__a ).last_hidden_state
__lowerCAmelCase = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
| 57 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000 ) -> int:
lowerCAmelCase__ : List[Any] = 1, 1
lowerCAmelCase__ : Union[str, Any] = 2
while True:
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : str = fa + fa
lowerCAmelCase__ : Optional[int] = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 350 |
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out | 307 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ):
snake_case : int = args.log_outputs
snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case : List[str] = load_metric("wer" )
snake_case : Tuple = load_metric("cer" )
# compute metrics
snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__lowerCamelCase )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(__lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : int = f"""log_{dataset_id}_predictions.txt"""
snake_case : List[Any] = f"""log_{dataset_id}_targets.txt"""
with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__lowerCamelCase , with_indices=__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case : Dict = " ".join(text.split(__lowerCamelCase ) )
return text
def UpperCamelCase ( __lowerCamelCase : int ):
# load dataset
snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) )
# load eval pipeline
if args.device is None:
snake_case : List[str] = 0 if torch.cuda.is_available() else -1
snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCamelCase : int ):
snake_case : Dict = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : str = prediction["text"]
snake_case : Tuple = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 59 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase : Any = logging.get_logger(__name__)
# General docstring
lowercase : Optional[Any] = "ResNetConfig"
# Base docstring
lowercase : Any = "microsoft/resnet-50"
lowercase : str = [1, 2048, 7, 7]
# Image classification docstring
lowercase : List[str] = "microsoft/resnet-50"
lowercase : List[Any] = "tiger cat"
lowercase : Tuple = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
"""simple docstring"""
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ):
"""simple docstring"""
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
"""simple docstring"""
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ):
"""simple docstring"""
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ):
"""simple docstring"""
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ):
"""simple docstring"""
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class __UpperCAmelCase ( __UpperCamelCase ):
__lowercase = ResNetConfig
__lowercase = """resnet"""
__lowercase = """pixel_values"""
__lowercase = True
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
lowercase : List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase : Dict = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , __UpperCamelCase , )
class __UpperCAmelCase ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __UpperCamelCase , )
class __UpperCAmelCase ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = """single_label_classification"""
else:
_snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , __UpperCamelCase , )
class __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 356 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160 | 0 |
'''simple docstring'''
__A =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 163 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 | 0 |
'''simple docstring'''
import numpy as np
def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
'''simple docstring'''
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 67 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a = """convnextv2"""
def __init__( self , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="gelu" , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=0.0 , lowerCAmelCase=224 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
_lowercase =num_channels
_lowercase =patch_size
_lowercase =num_stages
_lowercase =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowercase =[3, 3, 9, 3] if depths is None else depths
_lowercase =hidden_act
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =drop_path_rate
_lowercase =image_size
_lowercase =['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 205 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( A__ : bool = True , *A__ : int , **A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*A__ , **A__ , disable=A__ )
| 205 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a__: Optional[int] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a__: Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a__: Optional[int] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,homepage='''https://github.com/krishnap25/mauve''',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''',id='''sequence''' ),
'''references''': datasets.Value('''string''',id='''sequence''' ),
} ),codebase_urls=['''https://github.com/krishnap25/mauve'''],reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
],)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase="auto",__lowerCamelCase=-1,__lowerCamelCase=0.9,__lowerCamelCase=5,__lowerCamelCase=500,__lowerCamelCase="gpt2-large",__lowerCamelCase=-1,__lowerCamelCase=1024,__lowerCamelCase=25,__lowerCamelCase=5,__lowerCamelCase=True,__lowerCamelCase=25,):
A__ = compute_mauve(
p_text=__lowerCamelCase,q_text=__lowerCamelCase,p_features=__lowerCamelCase,q_features=__lowerCamelCase,p_tokens=__lowerCamelCase,q_tokens=__lowerCamelCase,num_buckets=__lowerCamelCase,pca_max_data=__lowerCamelCase,kmeans_explained_var=__lowerCamelCase,kmeans_num_redo=__lowerCamelCase,kmeans_max_iter=__lowerCamelCase,featurize_model_name=__lowerCamelCase,device_id=__lowerCamelCase,max_text_length=__lowerCamelCase,divergence_curve_discretization_size=__lowerCamelCase,mauve_scaling_factor=__lowerCamelCase,verbose=__lowerCamelCase,seed=__lowerCamelCase,)
return out
| 39 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39 | 1 |
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : int = 10_001 ):
"""simple docstring"""
try:
a__ : Optional[int] =int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a__ : list[int] =[]
a__ : int =2
while len(SCREAMING_SNAKE_CASE ) < nth:
if is_prime(SCREAMING_SNAKE_CASE ):
primes.append(SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A_ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A_ : List[str] = truncation
A_ : str = tokenize_kwargs
A_ : Optional[Any] = {}
if return_tensors is not None:
A_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict[str, GenericTensor]:
'''simple docstring'''
A_ : str = self.framework
A_ : Any = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->List[Any]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 186 | 0 |
"""simple docstring"""
import os
def _a ( ) -> int:
'''simple docstring'''
with open(os.path.dirname(_UpperCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ : int = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : int = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 350 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE__ : list[float] , SCREAMING_SNAKE_CASE__ : list[float] ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = sorted(numsa + numsa )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = divmod(len(SCREAMING_SNAKE_CASE__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : List[str] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
_lowerCamelCase : Any = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 191 | 0 |
'''simple docstring'''
import random
from typing import Any
def _a( UpperCamelCase__ : list ):
'''simple docstring'''
for _ in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ : List[Any] =random.randint(0, len(lowercase__ ) - 1 )
SCREAMING_SNAKE_CASE__ : List[str] =random.randint(0, len(lowercase__ ) - 1 )
SCREAMING_SNAKE_CASE__ : List[str] =data[b], data[a]
return data
if __name__ == "__main__":
a_ = [0, 1, 2, 3, 4, 5, 6, 7]
a_ = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 152 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = eval_examples
snake_case : Any = post_process_function
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "eval" , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
snake_case : Any = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
snake_case : Optional[int] = gen_kwargs
snake_case : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case : List[Any] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE )
snake_case : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : List[str] = self.compute_metrics
snake_case : Tuple = None
snake_case : str = time.time()
snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : List[Any] = eval_loop(
SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : List[str] = compute_metrics
snake_case : str = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case : Tuple = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : List[Any] = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : Any = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
snake_case : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE )
return metrics
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = "test" , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : int = self.get_test_dataloader(SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : Optional[int] = self.compute_metrics
snake_case : Dict = None
snake_case : int = time.time()
snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : Dict = eval_loop(
SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : Optional[int] = compute_metrics
snake_case : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case : int = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "predict" )
snake_case : Any = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : List[str] = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE )
| 148 | 0 |
'''simple docstring'''
UpperCamelCase__ = 8.314_462 # Unit - J mol-1 K-1
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 299 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
UpperCamelCase__ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCamelCase__ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCamelCase__ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCamelCase__ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCamelCase__ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCamelCase__ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCamelCase__ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCamelCase__ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : int = value
elif weight_type == "running_mean":
UpperCAmelCase__ : int = value
elif weight_type == "running_var":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = []
if task == "s2t":
UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[Any] = MAPPING_S2T
UpperCAmelCase__ : int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Tuple = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked'''
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : str = config.max_text_positions
UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : int = 6_00
UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions
UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : Optional[Any] = config.max_speech_positions
UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCAmelCase__ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Optional[Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "codegen"
_UpperCamelCase : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=50400 , a__=2048 , a__=2048 , a__=4096 , a__=28 , a__=16 , a__=64 , a__=None , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=1e-5 , a__=0.0_2 , a__=True , a__=50256 , a__=50256 , a__=False , **a__ , ):
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = n_ctx
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : List[Any] = n_inner
_lowerCAmelCase : int = rotary_dim
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : List[str] = resid_pdrop
_lowerCAmelCase : Union[str, Any] = embd_pdrop
_lowerCAmelCase : List[str] = attn_pdrop
_lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Union[str, Any] = bos_token_id
_lowerCAmelCase : Optional[int] = eos_token_id
super().__init__(
bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ):
super().__init__(a__ , task=a__ , patching_specs=a__ , use_past=a__ )
if not getattr(self._config , """pad_token_id""" , a__ ):
# TODO: how to do that better?
_lowerCAmelCase : List[str] = 0
@property
def __A ( self ):
_lowerCAmelCase : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="""inputs""" )
_lowerCAmelCase : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : Any = super(a__ , self ).generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Any = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase : str = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 44 | """simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = 1
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(_A ) for k, v in self.__dict__.items()} )
| 299 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _lowerCamelCase , )
| 215 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 1 |
"""simple docstring"""
from functools import reduce
snake_case__ : Optional[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _snake_case ( _snake_case : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) )
for i in range(len(_snake_case ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = TFEsmModel(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
__a = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmForMaskedLM(config=_a )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( self ):
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 45 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None:
super().__init__(**UpperCamelCase )
snake_case__ = size if size is not None else {'shortest_edge': 2_56}
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray:
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray:
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]:
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
snake_case__ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 307 | 0 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 16
_lowerCamelCase : Any = 32
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 ) -> Optional[Any]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
A__ , A__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 353 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__a : Optional[int] = img
__a : Any = img.shape[1]
__a : Optional[int] = img.shape[0]
__a : Tuple = dst_width
__a : List[Any] = dst_height
__a : Optional[int] = self.src_w / self.dst_w
__a : Tuple = self.src_h / self.dst_h
__a : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : Optional[int] = self.img[self.get_y(_UpperCAmelCase )][self.get_x(_UpperCAmelCase )]
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_x * x )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A = 800, 600
A = imread('''image_data/lena.jpg''', 1)
A = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows() | 160 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowercase () -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE = queue.Queue()
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
SCREAMING_SNAKE_CASE = F'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = left_node
q.put(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = F'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = []
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE = n.right
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = n.left
SCREAMING_SNAKE_CASE = stack.pop()
print(n.data , end=',' )
SCREAMING_SNAKE_CASE = n.right
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase (SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : int=50 , SCREAMING_SNAKE_CASE_ : Tuple="*" ) -> str:
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__UpperCamelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 366 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__UpperCamelCase = {'''mgp-str''': 27}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[s]" , lowerCAmelCase__="[GO]" , **lowerCAmelCase__ ) -> int:
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __A ( self ) -> List[str]:
return len(self.vocab )
def __A ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def __A ( self , lowerCAmelCase__ ) -> int:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __A ( self , lowerCAmelCase__ ) -> int:
return self.decoder.get(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
return (vocab_file,)
| 38 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__ :
def __init__( self : List[str] , a : Optional[int] , a : Any=13 , a : Optional[int]=7 , a : Optional[Any]=False , a : Tuple=True , a : int=False , a : str=False , a : List[Any]=19 , a : Union[str, Any]=32 , a : Optional[int]=5 , a : Tuple=4 , a : Optional[int]=37 , a : int="gelu" , a : Optional[int]=0.1 , a : Union[str, Any]=0.1 , a : List[Any]=5_12 , a : Dict=16 , a : List[str]=2 , a : Optional[Any]=0.02 , a : int=3 , a : Optional[Any]=4 , a : int=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Any , a : Dict , a : str , a : List[Any] , a : Union[str, Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = EsmForProteinFolding(config=a ).float()
model.to(a )
model.eval()
__lowerCamelCase = model(a , attention_mask=a )
__lowerCamelCase = model(a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[Any] =False
lowerCamelCase : List[Any] =(EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase : Tuple =()
lowerCamelCase : Any ={} if is_torch_available() else {}
lowerCamelCase : Any =False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = EsmFoldModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
pass
@require_torch
class a__ ( UpperCAmelCase__ ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__lowerCamelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCamelCase = model(a )['''positions''']
__lowerCamelCase = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4 ) )
| 67 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 210 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 210 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 42
class __lowerCamelCase ( snake_case__ , snake_case__):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = ("DownEncoderBlock2D",) , UpperCAmelCase = ("UpDecoderBlock2D",) , UpperCAmelCase = (64,) , UpperCAmelCase = 1 , UpperCAmelCase = "silu" , UpperCAmelCase = 3 , UpperCAmelCase = 32 , UpperCAmelCase = 256 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = 0.1_82_15 , UpperCAmelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_UpperCAmelCase = Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
_UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCAmelCase = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
_UpperCAmelCase = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.25 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
_UpperCAmelCase = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
_UpperCAmelCase = Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = True ):
"""simple docstring"""
_UpperCAmelCase = self.encoder(UpperCAmelCase )
_UpperCAmelCase = self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
"""simple docstring"""
if not force_not_quantize:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.quantize(UpperCAmelCase )
else:
_UpperCAmelCase = h
_UpperCAmelCase = self.post_quant_conv(UpperCAmelCase )
_UpperCAmelCase = self.decoder(UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = True ):
"""simple docstring"""
_UpperCAmelCase = sample
_UpperCAmelCase = self.encode(UpperCAmelCase ).latents
_UpperCAmelCase = self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 39 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> Any:
snake_case = 3_84
snake_case = 7
if "tiny" in model_name:
snake_case = 96
snake_case = (2, 2, 6, 2)
snake_case = (3, 6, 12, 24)
elif "small" in model_name:
snake_case = 96
snake_case = (2, 2, 18, 2)
snake_case = (3, 6, 12, 24)
elif "base" in model_name:
snake_case = 1_28
snake_case = (2, 2, 18, 2)
snake_case = (4, 8, 16, 32)
snake_case = 12
snake_case = 5_12
elif "large" in model_name:
snake_case = 1_92
snake_case = (2, 2, 18, 2)
snake_case = (6, 12, 24, 48)
snake_case = 12
snake_case = 7_68
# set label information
snake_case = 1_50
snake_case = """huggingface/label-files"""
snake_case = """ade20k-id2label.json"""
snake_case = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(A__ ): v for k, v in idalabel.items()}
snake_case = {v: k for k, v in idalabel.items()}
snake_case = SwinConfig(
embed_dim=A__ , depths=A__ , num_heads=A__ , window_size=A__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
snake_case = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Tuple:
snake_case = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
snake_case = dct.pop(A__ )
snake_case = val
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Any:
snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
snake_case = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[:dim, :]
snake_case = in_proj_bias[: dim]
snake_case = in_proj_weight[
dim : dim * 2, :
]
snake_case = in_proj_bias[
dim : dim * 2
]
snake_case = in_proj_weight[
-dim :, :
]
snake_case = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> int:
snake_case , snake_case = x.shape
snake_case = x.reshape(A__ , 4 , in_channel // 4 )
snake_case = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A__ , A__ )
return x
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Any:
snake_case , snake_case = x.shape
snake_case = x.reshape(A__ , in_channel // 4 , 4 )
snake_case = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A__ , A__ )
return x
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[int]:
snake_case = x.shape[0]
snake_case = x.reshape(4 , in_channel // 4 )
snake_case = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A__ )
return x
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
snake_case = x.shape[0]
snake_case = x.reshape(in_channel // 4 , 4 )
snake_case = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A__ )
return x
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
snake_case = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
snake_case = model_name_to_url[model_name]
snake_case = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" , file_name=A__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(A__ , param.shape )
snake_case = get_upernet_config(A__ )
snake_case = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case = state_dict.pop(A__ )
if "bn" in key:
snake_case = key.replace("""bn""" , """batch_norm""" )
snake_case = val
# rename keys
snake_case = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
snake_case = reverse_correct_unfold_reduction_order(A__ )
if "norm" in key:
snake_case = reverse_correct_unfold_norm_order(A__ )
model.load_state_dict(A__ )
# verify on image
snake_case = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
snake_case = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("""RGB""" )
snake_case = SegformerImageProcessor()
snake_case = processor(A__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
snake_case = model(A__ )
snake_case = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
snake_case = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
snake_case = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
snake_case = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Dict = '''AutoTokenizer'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''tokenizer''']
SCREAMING_SNAKE_CASE_ : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = speaker_embeddings
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="speaker_embeddings_path.json" ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
__SCREAMING_SNAKE_CASE :Any = get_file_from_repo(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,subfolder=kwargs.pop('''subfolder''' ,SCREAMING_SNAKE_CASE__ ) ,cache_dir=kwargs.pop('''cache_dir''' ,SCREAMING_SNAKE_CASE__ ) ,force_download=kwargs.pop('''force_download''' ,SCREAMING_SNAKE_CASE__ ) ,proxies=kwargs.pop('''proxies''' ,SCREAMING_SNAKE_CASE__ ) ,resume_download=kwargs.pop('''resume_download''' ,SCREAMING_SNAKE_CASE__ ) ,local_files_only=kwargs.pop('''local_files_only''' ,SCREAMING_SNAKE_CASE__ ) ,use_auth_token=kwargs.pop('''use_auth_token''' ,SCREAMING_SNAKE_CASE__ ) ,revision=kwargs.pop('''revision''' ,SCREAMING_SNAKE_CASE__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
__SCREAMING_SNAKE_CASE :List[Any] = None
else:
with open(SCREAMING_SNAKE_CASE__ ) as speaker_embeddings_json:
__SCREAMING_SNAKE_CASE :List[str] = json.load(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
return cls(tokenizer=SCREAMING_SNAKE_CASE__ ,speaker_embeddings=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="speaker_embeddings_path.json" ,SCREAMING_SNAKE_CASE__="speaker_embeddings" ,SCREAMING_SNAKE_CASE__ = False ,**SCREAMING_SNAKE_CASE__ ,) -> List[str]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,'''v2''' ) ,exist_ok=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = {}
__SCREAMING_SNAKE_CASE :List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__SCREAMING_SNAKE_CASE :Optional[int] = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] ,SCREAMING_SNAKE_CASE__ ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,f'''{prompt_key}_{key}.npy''' )
__SCREAMING_SNAKE_CASE :int = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,'''w''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
super().save_pretrained(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.speaker_embeddings[voice_preset]
__SCREAMING_SNAKE_CASE :int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
__SCREAMING_SNAKE_CASE :int = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' ,'''/''' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('''subfolder''' ,SCREAMING_SNAKE_CASE__ ) ,cache_dir=kwargs.pop('''cache_dir''' ,SCREAMING_SNAKE_CASE__ ) ,force_download=kwargs.pop('''force_download''' ,SCREAMING_SNAKE_CASE__ ) ,proxies=kwargs.pop('''proxies''' ,SCREAMING_SNAKE_CASE__ ) ,resume_download=kwargs.pop('''resume_download''' ,SCREAMING_SNAKE_CASE__ ) ,local_files_only=kwargs.pop('''local_files_only''' ,SCREAMING_SNAKE_CASE__ ) ,use_auth_token=kwargs.pop('''use_auth_token''' ,SCREAMING_SNAKE_CASE__ ) ,revision=kwargs.pop('''revision''' ,SCREAMING_SNAKE_CASE__ ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
__SCREAMING_SNAKE_CASE :Tuple = np.load(SCREAMING_SNAKE_CASE__ )
return voice_preset_dict
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="pt" ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
if (
isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__SCREAMING_SNAKE_CASE :List[Any] = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
else:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and not voice_preset.endswith('''.npz''' ):
__SCREAMING_SNAKE_CASE :Dict = voice_preset + '''.npz'''
__SCREAMING_SNAKE_CASE :int = np.load(SCREAMING_SNAKE_CASE__ )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.tokenizer(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,padding='''max_length''' ,max_length=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if voice_preset is not None:
__SCREAMING_SNAKE_CASE :Dict = voice_preset
return encoded_text | 191 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''new-model'''
if is_tf_available():
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = '''bert-base-cased'''
__SCREAMING_SNAKE_CASE :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = '''bert-base-cased'''
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE :Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE :Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.num_parameters() ,1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__ ) ,1_44_10 )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.num_parameters() ,1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__ ) ,1_44_10 )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE :List[str] = ['''FunnelBaseModel''']
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_config(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''new-model''' ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
auto_class.register(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE :Any = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE :Dict = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE :Union[str, Any] = auto_class.from_config(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = auto_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''bert-base''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,revision='''aaaaaa''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' ,):
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ ,'''Use `from_pt=True` to load this model''' ):
__SCREAMING_SNAKE_CASE :List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE :int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE :Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE :Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 191 | 1 |
'''simple docstring'''
from torch import nn
def __magic_name__ ( UpperCamelCase_ ) -> int:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299 |
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( a_, a_, a_, a_, a_, ):
'''simple docstring'''
lowerCamelCase : Optional[int] = len(a_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], a_, a_, )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : list[list[str]] = []
depth_first_search([], [], [], a_, a_ )
# Print all the boards
for board in boards:
for column in board:
print(a_ )
print('' )
print(len(a_ ), 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 363 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
while b:
lowerCamelCase , lowerCamelCase : Tuple = b, a % b
return a
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(a_, a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 205 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = TypeVar("DatasetType", Dataset, IterableDataset)
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = "first_exhausted", ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__lowerCamelCase ):
if not isinstance(__lowerCamelCase, (Dataset, IterableDataset) ):
if isinstance(__lowerCamelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(__lowerCamelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCamelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCamelCase ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
(Dataset, IterableDataset) if isinstance(__lowerCamelCase, __lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, info=__lowerCamelCase, split=__lowerCamelCase, stopping_strategy=__lowerCamelCase )
else:
return _interleave_iterable_datasets(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, info=__lowerCamelCase, split=__lowerCamelCase, stopping_strategy=__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = 0, ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__lowerCamelCase ):
if not isinstance(__lowerCamelCase, (Dataset, IterableDataset) ):
if isinstance(__lowerCamelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(__lowerCamelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCamelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCamelCase ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
(Dataset, IterableDataset) if isinstance(__lowerCamelCase, __lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__lowerCamelCase, info=__lowerCamelCase, split=__lowerCamelCase, axis=__lowerCamelCase )
else:
return _concatenate_iterable_datasets(__lowerCamelCase, info=__lowerCamelCase, split=__lowerCamelCase, axis=__lowerCamelCase )
| 299 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self,__lowerCamelCase=3,__lowerCamelCase=3,__lowerCamelCase=("DownEncoderBlock2D",),__lowerCamelCase=(64,),__lowerCamelCase=2,__lowerCamelCase=32,__lowerCamelCase="silu",__lowerCamelCase=True,):
super().__init__()
A__ = layers_per_block
A__ = torch.nn.Convad(
__lowerCamelCase,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
A__ = None
A__ = nn.ModuleList([] )
# down
A__ = block_out_channels[0]
for i, down_block_type in enumerate(__lowerCamelCase ):
A__ = output_channel
A__ = block_out_channels[i]
A__ = i == len(__lowerCamelCase ) - 1
A__ = get_down_block(
__lowerCamelCase,num_layers=self.layers_per_block,in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,add_downsample=not is_final_block,resnet_eps=1E-6,downsample_padding=0,resnet_act_fn=__lowerCamelCase,resnet_groups=__lowerCamelCase,attention_head_dim=__lowerCamelCase,temb_channels=__lowerCamelCase,)
self.down_blocks.append(__lowerCamelCase )
# mid
A__ = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1E-6,resnet_act_fn=__lowerCamelCase,output_scale_factor=1,resnet_time_scale_shift='''default''',attention_head_dim=block_out_channels[-1],resnet_groups=__lowerCamelCase,temb_channels=__lowerCamelCase,)
# out
A__ = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__lowerCamelCase,eps=1E-6 )
A__ = nn.SiLU()
A__ = 2 * out_channels if double_z else out_channels
A__ = nn.Convad(block_out_channels[-1],__lowerCamelCase,3,padding=1 )
A__ = False
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = x
A__ = self.conv_in(__lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase ):
def custom_forward(*__lowerCamelCase ):
return module(*__lowerCamelCase )
return custom_forward
# down
if is_torch_version('''>=''','''1.11.0''' ):
for down_block in self.down_blocks:
A__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCamelCase ),__lowerCamelCase,use_reentrant=__lowerCamelCase )
# middle
A__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__lowerCamelCase,use_reentrant=__lowerCamelCase )
else:
for down_block in self.down_blocks:
A__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCamelCase ),__lowerCamelCase )
# middle
A__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
A__ = down_block(__lowerCamelCase )
# middle
A__ = self.mid_block(__lowerCamelCase )
# post-process
A__ = self.conv_norm_out(__lowerCamelCase )
A__ = self.conv_act(__lowerCamelCase )
A__ = self.conv_out(__lowerCamelCase )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self,__lowerCamelCase=3,__lowerCamelCase=3,__lowerCamelCase=("UpDecoderBlock2D",),__lowerCamelCase=(64,),__lowerCamelCase=2,__lowerCamelCase=32,__lowerCamelCase="silu",__lowerCamelCase="group",):
super().__init__()
A__ = layers_per_block
A__ = nn.Convad(
__lowerCamelCase,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
A__ = None
A__ = nn.ModuleList([] )
A__ = in_channels if norm_type == '''spatial''' else None
# mid
A__ = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1E-6,resnet_act_fn=__lowerCamelCase,output_scale_factor=1,resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__lowerCamelCase,temb_channels=__lowerCamelCase,)
# up
A__ = list(reversed(__lowerCamelCase ) )
A__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
A__ = output_channel
A__ = reversed_block_out_channels[i]
A__ = i == len(__lowerCamelCase ) - 1
A__ = get_up_block(
__lowerCamelCase,num_layers=self.layers_per_block + 1,in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,prev_output_channel=__lowerCamelCase,add_upsample=not is_final_block,resnet_eps=1E-6,resnet_act_fn=__lowerCamelCase,resnet_groups=__lowerCamelCase,attention_head_dim=__lowerCamelCase,temb_channels=__lowerCamelCase,resnet_time_scale_shift=__lowerCamelCase,)
self.up_blocks.append(__lowerCamelCase )
A__ = output_channel
# out
if norm_type == "spatial":
A__ = SpatialNorm(block_out_channels[0],__lowerCamelCase )
else:
A__ = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__lowerCamelCase,eps=1E-6 )
A__ = nn.SiLU()
A__ = nn.Convad(block_out_channels[0],__lowerCamelCase,3,padding=1 )
A__ = False
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = z
A__ = self.conv_in(__lowerCamelCase )
A__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase ):
def custom_forward(*__lowerCamelCase ):
return module(*__lowerCamelCase )
return custom_forward
if is_torch_version('''>=''','''1.11.0''' ):
# middle
A__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__lowerCamelCase,__lowerCamelCase,use_reentrant=__lowerCamelCase )
A__ = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
A__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCamelCase ),__lowerCamelCase,__lowerCamelCase,use_reentrant=__lowerCamelCase )
else:
# middle
A__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__lowerCamelCase,__lowerCamelCase )
A__ = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
A__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCamelCase ),__lowerCamelCase,__lowerCamelCase )
else:
# middle
A__ = self.mid_block(__lowerCamelCase,__lowerCamelCase )
A__ = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
A__ = up_block(__lowerCamelCase,__lowerCamelCase )
# post-process
if latent_embeds is None:
A__ = self.conv_norm_out(__lowerCamelCase )
else:
A__ = self.conv_norm_out(__lowerCamelCase,__lowerCamelCase )
A__ = self.conv_act(__lowerCamelCase )
A__ = self.conv_out(__lowerCamelCase )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase="random",__lowerCamelCase=False,__lowerCamelCase=True ):
super().__init__()
A__ = n_e
A__ = vq_embed_dim
A__ = beta
A__ = legacy
A__ = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
A__ = remap
if self.remap is not None:
self.register_buffer('''used''',torch.tensor(np.load(self.remap ) ) )
A__ = self.used.shape[0]
A__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A__ = self.re_embed
A__ = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
A__ = n_e
A__ = sane_index_shape
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = inds.shape
assert len(__lowerCamelCase ) > 1
A__ = inds.reshape(ishape[0],-1 )
A__ = self.used.to(__lowerCamelCase )
A__ = (inds[:, :, None] == used[None, None, ...]).long()
A__ = match.argmax(-1 )
A__ = match.sum(2 ) < 1
if self.unknown_index == "random":
A__ = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
A__ = self.unknown_index
return new.reshape(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = inds.shape
assert len(__lowerCamelCase ) > 1
A__ = inds.reshape(ishape[0],-1 )
A__ = self.used.to(__lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
A__ = 0 # simply set to zero
A__ = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__lowerCamelCase )
return back.reshape(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
# reshape z -> (batch, height, width, channel) and flatten
A__ = z.permute(0,2,3,1 ).contiguous()
A__ = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A__ = torch.argmin(torch.cdist(__lowerCamelCase,self.embedding.weight ),dim=1 )
A__ = self.embedding(__lowerCamelCase ).view(z.shape )
A__ = None
A__ = None
# compute loss for embedding
if not self.legacy:
A__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A__ = z + (z_q - z).detach()
# reshape back to match original input shape
A__ = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
A__ = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
A__ = self.remap_to_used(__lowerCamelCase )
A__ = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
A__ = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A__ = indices.reshape(shape[0],-1 ) # add batch axis
A__ = self.unmap_to_all(__lowerCamelCase )
A__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A__ = self.embedding(__lowerCamelCase )
if shape is not None:
A__ = z_q.view(__lowerCamelCase )
# reshape back to match original input shape
A__ = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=False ):
A__ = parameters
A__ , A__ = torch.chunk(__lowerCamelCase,2,dim=1 )
A__ = torch.clamp(self.logvar,-30.0,20.0 )
A__ = deterministic
A__ = torch.exp(0.5 * self.logvar )
A__ = torch.exp(self.logvar )
if self.deterministic:
A__ = A__ = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def UpperCamelCase ( self,__lowerCamelCase = None ):
# make sure sample is on the same device as the parameters and has same dtype
A__ = randn_tensor(
self.mean.shape,generator=__lowerCamelCase,device=self.parameters.device,dtype=self.parameters.dtype )
A__ = self.mean + self.std * sample
return x
def UpperCamelCase ( self,__lowerCamelCase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__lowerCamelCase )
def UpperCamelCase ( self ):
return self.mean
| 356 |
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 0 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaPriorPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt", "negative_prompt"]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Dict = False
@property
def __A ( self ) -> Any:
'''simple docstring'''
return 32
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return 100
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : int = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__UpperCAmelCase : Tuple = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCAmelCase : str = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = self.dummy_prior
__UpperCAmelCase : List[str] = self.dummy_image_encoder
__UpperCAmelCase : int = self.dummy_text_encoder
__UpperCAmelCase : Dict = self.dummy_tokenizer
__UpperCAmelCase : Any = self.dummy_image_processor
__UpperCAmelCase : Optional[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
__UpperCAmelCase : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> int:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("""mps""" ):
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCAmelCase : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCAmelCase : Any = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = """cpu"""
__UpperCAmelCase : str = self.get_dummy_components()
__UpperCAmelCase : Any = self.pipeline_class(**__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
__UpperCAmelCase : Tuple = output.image_embeds
__UpperCAmelCase : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
__UpperCAmelCase : str = image[0, -10:]
__UpperCAmelCase : Dict = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCAmelCase : Union[str, Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = torch_device == """cpu"""
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : str = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch_device == """cpu"""
__UpperCAmelCase : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 16 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 1 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case__ ( _A: str = "" ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowerCAmelCase = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
lowerCAmelCase = soup.find_all("""td""" , attrs="""titleColumn""" )
lowerCAmelCase = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCAmelCase , __lowerCAmelCase )
}
def snake_case__ ( _A: str = "IMDb_Top_250_Movies.csv" ) -> Any:
'''simple docstring'''
lowerCAmelCase = get_imdb_top_aaa_movies()
with open(__lowerCAmelCase , """w""" , newline="""""" ) as out_file:
lowerCAmelCase = csv.writer(__lowerCAmelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 272 |
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
__UpperCAmelCase : List[Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCAmelCase : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
__UpperCAmelCase : Union[str, Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __snake_case ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Dict , A : argparse.Namespace , A : Optional[Any]=None , A : Optional[Any]="base" , A : List[str]=None , A : Union[str, Any]=None , A : List[str]=None , **A : int , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A )
__snake_case: Tuple = 0
__snake_case: Tuple = Path(self.hparams.output_dir )
__snake_case: Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=A , **A , )
else:
__snake_case: PretrainedConfig = config
__snake_case: Any = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , A , A ):
assert hasattr(self.config , A ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , A , getattr(self.hparams , A ) )
if tokenizer is None:
__snake_case: Tuple = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A , )
else:
__snake_case: PreTrainedTokenizer = tokenizer
__snake_case: Union[str, Any] = MODEL_MODES[mode]
if model is None:
__snake_case: List[str] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A , )
else:
__snake_case: List[Any] = model
def UpperCAmelCase__ ( self : Tuple , *A : int , **A : Optional[int] ):
__snake_case: int = self.model_type.from_pretrained(*A , **A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: str = arg_to_scheduler[self.hparams.lr_scheduler]
__snake_case: Optional[Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__snake_case: str = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = self.model
__snake_case: Tuple = ["""bias""", """LayerNorm.weight"""]
__snake_case: Tuple = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__snake_case: Optional[Any] = Adafactor(
A , lr=self.hparams.learning_rate , scale_parameter=A , relative_step=A )
else:
__snake_case: List[Any] = AdamW(
A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__snake_case: List[str] = optimizer
__snake_case: Dict = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int ):
return self.validation_step(A , A )
def UpperCAmelCase__ ( self : List[Any] , A : Tuple ):
return self.validation_end(A )
def UpperCAmelCase__ ( self : str ):
__snake_case: str = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__snake_case: str = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : str , A : int ):
if stage == "test":
__snake_case: str = len(self.test_dataloader().dataset )
else:
__snake_case: Optional[Any] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=A )
__snake_case: List[str] = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : str , A : str , A : int , A : bool = False ):
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ):
return self.train_loader
def UpperCAmelCase__ ( self : Dict ):
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=A )
def UpperCAmelCase__ ( self : Dict , A : List[str] ):
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
A , list(filter(A , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : Tuple , A : Dict[str, Any] ):
__snake_case: Optional[int] = self.output_dir.joinpath("""best_tfmr""" )
__snake_case: Optional[int] = self.step_count
self.model.save_pretrained(A )
self.tokenizer.save_pretrained(A )
@staticmethod
def UpperCAmelCase__ ( A : List[Any] , A : List[str] ):
parser.add_argument(
"""--model_name_or_path""" , default=A , type=A , required=A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=A , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=A , type=A , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(A ).parent / """test_run""" / """cache""" ) , type=A , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=A , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=A , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=A , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=A , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=A , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=A , metavar=A , type=A , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=A , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=A , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=A , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=A , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=A )
parser.add_argument("""--train_batch_size""" , default=32 , type=A )
parser.add_argument("""--eval_batch_size""" , default=32 , type=A )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase__ ( self : str , A : List[str] , A : int ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : int , A : Tuple ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A )
class __snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int , A : Tuple , A : Dict ):
__snake_case: Optional[int] = trainer.lr_schedulers[0]["""scheduler"""]
__snake_case: Dict = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A )
def UpperCAmelCase__ ( self : Dict , A : pl.Trainer , A : pl.LightningModule ):
rank_zero_info("""***** Validation results *****""" )
__snake_case: Tuple = trainer.callback_metrics
# Log results
for key in sorted(A ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , A : pl.Trainer , A : pl.LightningModule ):
rank_zero_info("""***** Test results *****""" )
__snake_case: Union[str, Any] = trainer.callback_metrics
# Log and save results to file
__snake_case: Optional[int] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(A , """w""" ) as writer:
for key in sorted(A ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(A , str(metrics[key] ) ) )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(SCREAMING_SNAKE_CASE__).parent / """test_run""" / """model_checkpoints""") , type=SCREAMING_SNAKE_CASE__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=SCREAMING_SNAKE_CASE__)
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm""")
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""")
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""")
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=42 , help="""random seed for initialization""")
parser.add_argument(
"""--data_dir""" , default=str(Path(SCREAMING_SNAKE_CASE__).parent / """test_run""" / """dummy-train-data""") , type=SCREAMING_SNAKE_CASE__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[] , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
pl.seed_everything(args.seed)
# init model
__snake_case: List[str] = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=SCREAMING_SNAKE_CASE__)
# add custom checkpoints
if checkpoint_callback is None:
__snake_case: Union[str, Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1)
if early_stopping_callback:
extra_callbacks.append(SCREAMING_SNAKE_CASE__)
if logging_callback is None:
__snake_case: Union[str, Any] = LoggingCallback()
__snake_case: Optional[int] = {}
if args.fpaa:
__snake_case: str = 16
if args.gpus > 1:
__snake_case: Dict = """auto"""
__snake_case: int = """ddp"""
__snake_case: List[Any] = args.accumulate_grad_batches
__snake_case: Union[str, Any] = None
__snake_case: Optional[int] = """auto"""
__snake_case: Any = pl.Trainer.from_argparse_args(
SCREAMING_SNAKE_CASE__ , weights_summary=SCREAMING_SNAKE_CASE__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=SCREAMING_SNAKE_CASE__ , val_check_interval=1 , num_sanity_val_steps=2 , **SCREAMING_SNAKE_CASE__ , )
if args.do_train:
trainer.fit(SCREAMING_SNAKE_CASE__)
else:
print("""RAG modeling tests with new set functions successfuly executed!""")
return trainer
| 293 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : Optional[Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=_a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=_a , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=_a , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=_a , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE_ : Optional[float] =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = {}
if self.train_dir is not None:
__UpperCamelCase = self.train_dir
if self.validation_dir is not None:
__UpperCamelCase = self.validation_dir
__UpperCamelCase = data_files if data_files else None
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=_a , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE_ : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ : str =field(default=_a , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=_a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ : float =field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=_a , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class snake_case ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float =field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def lowercase__ ( __lowercase : List[str] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
__UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
__UpperCamelCase = split["""train"""]
__UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
__UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
__UpperCamelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
__UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
__UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase = ViTMAEForPreTraining(__lowercase )
if training_args.do_train:
__UpperCamelCase = ds["""train"""].column_names
else:
__UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase = """image"""
elif "img" in column_names:
__UpperCamelCase = """img"""
else:
__UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
__UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
__UpperCamelCase = Compose(
[
Lambda(lambda __lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowercase : Union[str, Any] ):
__UpperCamelCase = [transforms(__lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Compute absolute learning rate
__UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__UpperCamelCase = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
__UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase = last_checkpoint
__UpperCamelCase = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
# Write model card and (optionally) push to hub
__UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 53 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for rt in rc.restypes:
UpperCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase = {name: i for i, name in enumerate(_snake_case )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase = torch.tensor(
_snake_case , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCAmelCase = torch.tensor(
_snake_case , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCAmelCase = torch.tensor(
_snake_case , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCAmelCase = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase = restype_atomaa_mask[protein_aatype]
UpperCAmelCase = residx_atomaa_mask
UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase = rc.restype_atoa[restype_letter]
UpperCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase = rc.atom_order[atom_name]
UpperCAmelCase = 1
UpperCAmelCase = restype_atomaa_mask[protein_aatype]
UpperCAmelCase = residx_atomaa_mask
return protein
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = tree_map(lambda _snake_case : torch.tensor(_snake_case , device=batch["""aatype"""].device ) , _snake_case , np.ndarray )
UpperCAmelCase = tensor_tree_map(lambda _snake_case : np.array(_snake_case ) , make_atomaa_masks(_snake_case ) )
return out
| 234 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self ,A ,A ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self ,A = 1 ,A = 2_000 ,A = None ,A = "pil" ,A = True ,**A ,):
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
UpperCAmelCase = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase = self.unet(A ,A ).sample
UpperCAmelCase = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
UpperCAmelCase = model(A ,A ).sample
UpperCAmelCase = self.scheduler.step_pred(A ,A ,A ,generator=A )
UpperCAmelCase , UpperCAmelCase = output.prev_sample, output.prev_sample_mean
UpperCAmelCase = sample_mean.clamp(0 ,1 )
UpperCAmelCase = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 234 | 1 |
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase__ )
self.set_fail_transitions()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = 0
for character in keyword:
__lowercase = self.find_next_state(lowerCAmelCase__ , lowerCAmelCase__ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowercase = len(self.adlist ) - 1
else:
__lowercase = next_state
self.adlist[current_state]["output"].append(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
__lowercase = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase__ )
__lowercase = 0
while q:
__lowercase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase__ )
__lowercase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase__ , self.adlist[child]['''value'''] ) is None
and state != 0
):
__lowercase = self.adlist[state]['''fail_state''']
__lowercase = self.find_next_state(
lowerCAmelCase__ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__lowercase = 0
__lowercase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> dict[str, list[int]]:
'''simple docstring'''
__lowercase = {} # returns a dict with keywords and list of its occurrences
__lowercase = 0
for i in range(len(lowerCAmelCase__ ) ):
while (
self.find_next_state(lowerCAmelCase__ , string[i] ) is None
and current_state != 0
):
__lowercase = self.adlist[current_state]['''fail_state''']
__lowercase = self.find_next_state(lowerCAmelCase__ , string[i] )
if next_state is None:
__lowercase = 0
else:
__lowercase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowercase = []
result[key].append(i - len(lowerCAmelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 | import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__lowercase = psutil.Process()
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = -1
while True:
__lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = threading.Thread(target=self.peak_monitor )
__lowercase = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = False
self.thread.join()
return self.cpu_memory_peak
__a : List[str] = PeakCPUMemory()
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = (torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
__lowercase = (torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB" )
__lowercase = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = False , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.01 , lowerCAmelCase__ = 1 , ):
'''simple docstring'''
lowercase = False
lowercase = search_prob
lowercase = start_temperate
lowercase = []
lowercase = 0
lowercase = None
while not search_end:
lowercase = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase = current_state
scores.append(lowerCAmelCase__ )
iterations += 1
lowercase = None
lowercase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) # picking a random neighbor
lowercase = neighbors.pop(lowerCAmelCase__ )
lowercase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase = picked_neighbor
else:
lowercase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase = picked_neighbor
lowercase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase = True
else:
lowercase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase__ ) , lowerCAmelCase__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase__ :Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowercase__ :int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ :str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowercase__ :Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ :Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
lowercase__ :List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ :List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
| 97 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=1_6 ,A__=3_6 ,A__=6 ,A__=6 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,sentence_order_label=A__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : str =True
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
return inputs_dict
def A__ ( self):
lowercase = AlbertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
@slow
def A__ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = AlbertModel.from_pretrained('''albert-base-v2''')
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__)[0]
lowercase = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A__ ,atol=1E-4))
| 97 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = params
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Optional[int] = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __UpperCamelCase( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.params.max_model_input_size
UpperCamelCase : Dict = self.lengths > max_len
logger.info(F"""Splitting {sum(A_ )} too long sequences.""" )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
UpperCamelCase : List[str] = []
UpperCamelCase : Tuple = []
if self.params.mlm:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase : Dict = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase : Union[str, Any] = np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
UpperCamelCase : Optional[int] = np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Union[str, Any] = np.array(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = len(self )
UpperCamelCase : Dict = self.lengths > 11
UpperCamelCase : List[str] = self.token_ids[indices]
UpperCamelCase : Tuple = self.lengths[indices]
UpperCamelCase : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase : List[str] = self.params.special_tok_ids["unk_token"]
UpperCamelCase : int = len(self )
UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase : List[Any] = (unk_occs / self.lengths) < 0.5
UpperCamelCase : List[Any] = self.token_ids[indices]
UpperCamelCase : str = self.lengths[indices]
UpperCamelCase : Dict = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = [t[0] for t in batch]
UpperCamelCase : str = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
UpperCamelCase : Union[str, Any] = max(A_ )
# Pad token ids
if self.params.mlm:
UpperCamelCase : List[Any] = self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase : Dict = self.params.special_tok_ids["unk_token"]
UpperCamelCase : Optional[int] = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
UpperCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase : Any = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 52 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 'imagenet-1k-id2label.json'
__lowerCAmelCase : str = 1000
__lowerCAmelCase : int = 'huggingface/label-files'
__lowerCAmelCase : List[str] = num_labels
__lowerCAmelCase : Dict = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
__lowerCAmelCase : Dict = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase : str = idalabel
__lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Union[str, Any] = CvtConfig(num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
__lowerCAmelCase : str = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
__lowerCAmelCase : List[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowerCAmelCase : str = [2, 2, 20]
__lowerCAmelCase : Union[str, Any] = [3, 12, 16]
__lowerCAmelCase : str = [192, 768, 1024]
__lowerCAmelCase : str = CvtForImageClassification(snake_case_ )
__lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = torch.load(snake_case_ , map_location=torch.device('cpu' ) )
__lowerCAmelCase : Any = OrderedDict()
__lowerCAmelCase : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowerCAmelCase : Union[str, Any] = list_of_state_dict + cls_token(snake_case_ )
__lowerCAmelCase : Union[str, Any] = list_of_state_dict + embeddings(snake_case_ )
for cnt in range(config.depth[idx] ):
__lowerCAmelCase : str = list_of_state_dict + attention(snake_case_ , snake_case_ )
__lowerCAmelCase : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case_ )
for i in range(len(snake_case_ ) ):
__lowerCAmelCase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you\'d like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 86 |
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = first_str.lower().strip()
__UpperCAmelCase = second_str.lower().strip()
# Remove whitespace
__UpperCAmelCase = first_str.replace(''' ''' , '''''' )
__UpperCAmelCase = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
__UpperCAmelCase = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] = input('Enter the first string ').strip()
_lowercase : Tuple = input('Enter the second string ').strip()
_lowercase : str = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 332 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase_ : List[str] = TypeVar("""KT""")
lowerCamelCase_ : Optional[Any] = TypeVar("""VT""")
class a__ ( Generic[KT, VT] ):
def __init__( self , UpperCAmelCase = "root" , UpperCAmelCase = None ) -> Optional[Any]:
__a = key
__a = value
__a = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.forward )
class a__ ( Generic[KT, VT] ):
def __init__( self , UpperCAmelCase = 0.5 , UpperCAmelCase = 1_6 ) -> Any:
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self ) -> str:
__a = list(self )
if len(UpperCAmelCase ) == 0:
return f'''SkipList(level={self.level})'''
__a = max((len(str(UpperCAmelCase ) ) for item in items) , default=4 )
__a = max(UpperCAmelCase , 4 ) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(UpperCAmelCase , '-' ) + '* ' * len(UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(UpperCAmelCase ) )
while len(node.forward ) != 0:
__a = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(UpperCAmelCase ) )
__a = node.forward
lines.append('None'.ljust(UpperCAmelCase ) + '* ' * len(UpperCAmelCase ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(UpperCAmelCase )
def __iter__( self ) -> str:
__a = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__a = node.forward[0]
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__a = []
__a = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]:
__a , __a = self._locate_node(UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
__a , __a = self._locate_node(UpperCAmelCase )
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , UpperCAmelCase ):
update_vector.append(self.head )
__a = level
__a = Node(UpperCAmelCase , UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(UpperCAmelCase )
else:
__a = new_node
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> VT | None:
__a , __a = self._locate_node(UpperCAmelCase )
if node is not None:
return node.value
return None
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(__lowerCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(__lowerCamelCase ) != 4:
print()
assert len(__lowerCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCAmelCase( ):
__a = SkipList()
assert skip_list.find('Some key' ) is None
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def lowerCAmelCase( ):
__a = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(__lowerCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__lowerCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase( ):
def is_sorted(__lowerCamelCase ):
return all(next_item >= item for item, next_item in zip(__lowerCamelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(__lowerCamelCase , __lowerCamelCase )
assert is_sorted(list(__lowerCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__lowerCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__lowerCamelCase ) )
def lowerCAmelCase( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase( ):
__a = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 197 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase( __lowerCamelCase ):
return (data["data"], data["target"])
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCamelCase , __lowerCamelCase )
# Predict target for test data
__a = xgb.predict(__lowerCamelCase )
__a = predictions.reshape(len(__lowerCamelCase ) , 1 )
return predictions
def lowerCAmelCase( ):
__a = fetch_california_housing()
__a , __a = data_handling(__lowerCamelCase )
__a , __a , __a , __a = train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 , random_state=1 )
__a = xgboost(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__lowerCamelCase , __lowerCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(__lowerCamelCase , __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 197 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = (UnCLIPScheduler,)
def lowerCamelCase ( self :int , **__UpperCamelCase :Tuple ):
A = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__UpperCamelCase )
return config
def lowerCamelCase ( self :Union[str, Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCamelCase , prev_timestep=__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(variance_type="fixed_small_log" )
A = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_994_987 ) ) < 1e-5
def lowerCamelCase ( self :Optional[int] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(variance_type="learned_range" )
A = scheduler_class(**__UpperCamelCase )
A = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCamelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=__UpperCamelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=__UpperCamelCase ) - -0.0_010_011 < 1e-5
def lowerCamelCase ( self :int ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = scheduler.timesteps
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def lowerCamelCase ( self :List[Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(25 )
A = scheduler.timesteps
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
if i + 1 == timesteps.shape[0]:
A = None
else:
A = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A = scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , prev_timestep=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Union[str, Any] ):
pass
| 292 |
import argparse
import os
import re
lowercase_ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(R'\[([^\]]+)\]')
def a ( A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =_re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def a ( A__ : Optional[Any] , A__ : Dict="" , A__ : Union[str, Any]=None , A__ : Tuple=None ) -> Dict:
"""simple docstring"""
_lowercase =0
_lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
_lowercase =['\n'.join(lines[:index] )]
else:
_lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase =[lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A__ ) )
if index < len(A__ ) - 1:
_lowercase =[lines[index + 1]]
index += 1
else:
_lowercase =[]
else:
blocks.append('\n'.join(A__ ) )
_lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('\n'.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A__ : Any ):
return key(A__ ).lower().replace('_' , '' )
return _inner
def a ( A__ : Any , A__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
def noop(A__ : Optional[int] ):
return x
if key is None:
_lowercase =noop
# Constants are all uppercase, they go first.
_lowercase =[obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase =[obj for obj in objects if not key(A__ )[0].isupper()]
_lowercase =ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def a ( A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(A__ : Optional[int] ):
_lowercase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A__ )] ) + "]"
_lowercase =import_statement.split('\n' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase =2 if lines[1].strip() == '[' else 1
_lowercase =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase =sort_objects(A__ , key=lambda A__ : x[1] )
_lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
_lowercase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
_lowercase =_re_bracket_content.sub(_replace , A__ )
return import_statement
def a ( A__ : Dict , A__ : int=True ) -> Optional[Any]:
"""simple docstring"""
with open(A__ , encoding='utf-8' ) as f:
_lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase =split_code_in_indented_blocks(
A__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase =main_blocks[block_idx]
_lowercase =block.split('\n' )
# Get to the start of the imports.
_lowercase =0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase =len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase ='\n'.join(block_lines[line_idx:-1] )
_lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase =split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase =_re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase =[(i, key) for i, key in enumerate(A__ ) if key is not None]
_lowercase =[x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase =0
_lowercase =[]
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
_lowercase ='\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
def a ( A__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
_lowercase =[]
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
_lowercase =sort_imports(os.path.join(A__ , '__init__.py' ) , check_only=A__ )
if result:
_lowercase =[os.path.join(A__ , '__init__.py' )]
if len(A__ ) > 0:
raise ValueError(F'''Would overwrite {len(A__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 205 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCamelCase = """base_with_context"""
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase_ : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ : Dict = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase_ : Optional[int] = ly_weight["attention"]
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase_ : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase_ : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ : Any = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : str = ly_weight["attention"]
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase_ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase_ : Union[str, Any] = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase_ : List[Any] = ly_weight["self_attention"]
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase_ : Optional[int] = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase_ : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase_ : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase_ : Tuple = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase_ : str = jnp.tree_util.tree_map(onp.array , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.checkpoint_path , ".." , "config.gin" )
UpperCAmelCase_ : int = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = inference.InferenceModel(args.checkpoint_path , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
UpperCAmelCase_ : Optional[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
UpperCAmelCase_ : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
UpperCAmelCase_ : List[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase_ : Optional[Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = load_decoder(ta_checkpoint["target"]["decoder"] , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase_ : Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_lowerCamelCase = parser.parse_args()
main(args)
| 352 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[int] = 8
# DPR tok
UpperCAmelCase_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,"dpr_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
UpperCAmelCase_ : List[str] = os.path.join(_snake_case ,DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : str = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Optional[int] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,"bart_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
UpperCAmelCase_ : Any = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase__ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def UpperCamelCase__ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def UpperCamelCase__ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"bart_tokenizer" ) )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dummy_dataset()
UpperCAmelCase_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCAmelCase_ : List[Any] = dataset
UpperCAmelCase_ : Any = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset()
UpperCAmelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="custom" ,)
if from_disk:
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"dataset" )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname ,"index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"dataset" ) )
del dataset
UpperCAmelCase_ : List[Any] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
UpperCAmelCase_ : int = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,_snake_case ) ,)
return retriever
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,"hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" ,index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] ,open(index_file_name + ".index_meta.dpr" ,"wb" ) )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,"psgs_w100.tsv.pkl" )
UpperCAmelCase_ : Optional[Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(_snake_case ,open(_snake_case ,"wb" ) )
UpperCAmelCase_ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="legacy" ,index_path=self.tmpdirname ,)
UpperCAmelCase_ : Optional[Any] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
UpperCAmelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : int = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : List[Any] = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : str = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : List[str] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["text"][0] ,"bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] ,"foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
import torch
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : Tuple = [[5, 7], [10, 11]]
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,np.ndarray )
UpperCAmelCase_ : Optional[Any] = retriever(
_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = [[5, 7], [10, 11]]
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) ,_snake_case ) # check for doc token related keys in dictionary.
| 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.