code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( __A ):
'''simple docstring'''
__snake_case = """mobilenet_v1"""
def __init__( self: Optional[Any] , a: Tuple=3 , a: int=224 , a: str=1.0 , a: Any=8 , a: Tuple="relu6" , a: Dict=True , a: List[str]=0.9_9_9 , a: str=0.0_2 , a: List[str]=0.0_0_1 , **a: str , ):
super().__init__(**a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Any = image_size
__lowerCamelCase : str = depth_multiplier
__lowerCamelCase : Dict = min_depth
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : int = tf_padding
__lowerCamelCase : Dict = classifier_dropout_prob
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : str = layer_norm_eps
class A_ ( __A ):
'''simple docstring'''
__snake_case = version.parse("""1.11""" )
@property
def _snake_case ( self: Union[str, Any] ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _snake_case ( self: str ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _snake_case ( self: List[Any] ):
return 1e-4
| 704 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.json'}
lowercase_ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowercase_ = {'mgp-str': 2_7}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Any , a: int , a: int="[GO]" , a: Optional[Any]="[GO]" , a: Tuple="[s]" , a: List[Any]="[GO]" , **a: str ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : List[Any] = json.load(a )
__lowerCamelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self: Tuple ):
return len(self.vocab )
def _snake_case ( self: Tuple ):
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : List[str] = []
for s in text:
char_tokens.extend(a )
return char_tokens
def _snake_case ( self: int , a: List[str] ):
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _snake_case ( self: List[Any] , a: Tuple ):
return self.decoder.get(a )
def _snake_case ( self: List[Any] , a: str , a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
__lowerCamelCase : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
return (vocab_file,)
| 230 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
| 84 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 42
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case__ = 6_5536 , snake_case__ = None , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 0 , snake_case__ = "fourier" , snake_case__ = True , snake_case__ = False , snake_case__ = 0.0 , snake_case__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case__ = "UNetMidBlock1D" , snake_case__ = None , snake_case__ = (32, 32, 64) , snake_case__ = None , snake_case__ = 8 , snake_case__ = 1 , snake_case__ = False , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCAmelCase : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case__ , log=snake_case__ , flip_sin_to_cos=snake_case__ )
_lowerCAmelCase : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCAmelCase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case__ , downscale_freq_shift=snake_case__ )
_lowerCAmelCase : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
_lowerCAmelCase : Any = block_out_channels[0] * 4
_lowerCAmelCase : List[str] = TimestepEmbedding(
in_channels=snake_case__ , time_embed_dim=snake_case__ , act_fn=snake_case__ , out_dim=block_out_channels[0] , )
_lowerCAmelCase : Tuple = nn.ModuleList([] )
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[Any] = nn.ModuleList([] )
_lowerCAmelCase : str = None
# down
_lowerCAmelCase : List[str] = in_channels
for i, down_block_type in enumerate(snake_case__ ):
_lowerCAmelCase : str = output_channel
_lowerCAmelCase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCAmelCase : Tuple = i == len(snake_case__ ) - 1
_lowerCAmelCase : List[str] = get_down_block(
snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case__ )
# mid
_lowerCAmelCase : List[str] = get_mid_block(
snake_case__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case__ , add_downsample=snake_case__ , )
# up
_lowerCAmelCase : Union[str, Any] = list(reversed(snake_case__ ) )
_lowerCAmelCase : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCAmelCase : Tuple = out_channels
else:
_lowerCAmelCase : Tuple = block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
_lowerCAmelCase : int = output_channel
_lowerCAmelCase : Dict = (
reversed_block_out_channels[i + 1] if i < len(snake_case__ ) - 1 else final_upsample_channels
)
_lowerCAmelCase : Union[str, Any] = i == len(snake_case__ ) - 1
_lowerCAmelCase : Tuple = get_up_block(
snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case__ )
_lowerCAmelCase : List[str] = output_channel
# out
_lowerCAmelCase : Tuple = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCAmelCase : Any = get_out_block(
out_block_type=snake_case__ , num_groups_out=snake_case__ , embed_dim=block_out_channels[0] , out_channels=snake_case__ , act_fn=snake_case__ , fc_dim=block_out_channels[-1] // 4 , )
def a ( self , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = timestep
if not torch.is_tensor(snake_case__ ):
_lowerCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : List[Any] = timesteps[None].to(sample.device )
_lowerCAmelCase : int = self.time_proj(snake_case__ )
if self.config.use_timestep_embedding:
_lowerCAmelCase : Optional[int] = self.time_mlp(snake_case__ )
else:
_lowerCAmelCase : List[str] = timestep_embed[..., None]
_lowerCAmelCase : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCAmelCase : Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCAmelCase : str = ()
for downsample_block in self.down_blocks:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = downsample_block(hidden_states=snake_case__ , temb=snake_case__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCAmelCase : Any = self.mid_block(snake_case__ , snake_case__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCAmelCase : Union[str, Any] = down_block_res_samples[-1:]
_lowerCAmelCase : Union[str, Any] = down_block_res_samples[:-1]
_lowerCAmelCase : Union[str, Any] = upsample_block(snake_case__ , res_hidden_states_tuple=snake_case__ , temb=snake_case__ )
# 5. post-process
if self.out_block:
_lowerCAmelCase : Optional[Any] = self.out_block(snake_case__ , snake_case__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case__ )
| 444 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase ( unittest.TestCase ):
lowercase_ : int =MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ : Any =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A__ ( self):
lowercase = pipeline(task='''text-generation''' ,model='''sshleifer/tiny-ctrl''' ,framework='''pt''')
# Using `do_sample=False` to force deterministic output
lowercase = text_generator('''This is a test''' ,do_sample=A__)
self.assertEqual(
A__ ,[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] ,)
lowercase = text_generator(['''This is a test''', '''This is a second test'''])
self.assertEqual(
A__ ,[
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] ,)
lowercase = text_generator('''This is a test''' ,do_sample=A__ ,num_return_sequences=2 ,return_tensors=A__)
self.assertEqual(
A__ ,[
{'''generated_token_ids''': ANY(A__)},
{'''generated_token_ids''': ANY(A__)},
] ,)
lowercase = text_generator.model.config.eos_token_id
lowercase = '''<pad>'''
lowercase = text_generator(
['''This is a test''', '''This is a second test'''] ,do_sample=A__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=A__ ,)
self.assertEqual(
A__ ,[
[
{'''generated_token_ids''': ANY(A__)},
{'''generated_token_ids''': ANY(A__)},
],
[
{'''generated_token_ids''': ANY(A__)},
{'''generated_token_ids''': ANY(A__)},
],
] ,)
@require_tf
def A__ ( self):
lowercase = pipeline(task='''text-generation''' ,model='''sshleifer/tiny-ctrl''' ,framework='''tf''')
# Using `do_sample=False` to force deterministic output
lowercase = text_generator('''This is a test''' ,do_sample=A__)
self.assertEqual(
A__ ,[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] ,)
lowercase = text_generator(['''This is a test''', '''This is a second test'''] ,do_sample=A__)
self.assertEqual(
A__ ,[
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = TextGenerationPipeline(model=A__ ,tokenizer=A__)
return text_generator, ["This is a test", "Another test"]
def A__ ( self):
lowercase = '''Hello I believe in'''
lowercase = pipeline('''text-generation''' ,model='''hf-internal-testing/tiny-random-gpt2''')
lowercase = text_generator(A__)
self.assertEqual(
A__ ,[{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] ,)
lowercase = text_generator(A__ ,stop_sequence=''' fe''')
self.assertEqual(A__ ,[{'''generated_text''': '''Hello I believe in fe'''}])
def A__ ( self ,A__ ,A__):
lowercase = text_generator.model
lowercase = text_generator.tokenizer
lowercase = text_generator('''This is a test''')
self.assertEqual(A__ ,[{'''generated_text''': ANY(A__)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
lowercase = text_generator('''This is a test''' ,return_full_text=A__)
self.assertEqual(A__ ,[{'''generated_text''': ANY(A__)}])
self.assertNotIn('''This is a test''' ,outputs[0]['''generated_text'''])
lowercase = pipeline(task='''text-generation''' ,model=A__ ,tokenizer=A__ ,return_full_text=A__)
lowercase = text_generator('''This is a test''')
self.assertEqual(A__ ,[{'''generated_text''': ANY(A__)}])
self.assertNotIn('''This is a test''' ,outputs[0]['''generated_text'''])
lowercase = text_generator('''This is a test''' ,return_full_text=A__)
self.assertEqual(A__ ,[{'''generated_text''': ANY(A__)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
lowercase = text_generator(['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,do_sample=A__)
self.assertEqual(
A__ ,[
[{'''generated_text''': ANY(A__)}, {'''generated_text''': ANY(A__)}],
[{'''generated_text''': ANY(A__)}, {'''generated_text''': ANY(A__)}],
] ,)
if text_generator.tokenizer.pad_token is not None:
lowercase = text_generator(
['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,batch_size=2 ,do_sample=A__)
self.assertEqual(
A__ ,[
[{'''generated_text''': ANY(A__)}, {'''generated_text''': ANY(A__)}],
[{'''generated_text''': ANY(A__)}, {'''generated_text''': ANY(A__)}],
] ,)
with self.assertRaises(A__):
lowercase = text_generator('''test''' ,return_full_text=A__ ,return_text=A__)
with self.assertRaises(A__):
lowercase = text_generator('''test''' ,return_full_text=A__ ,return_tensors=A__)
with self.assertRaises(A__):
lowercase = text_generator('''test''' ,return_text=A__ ,return_tensors=A__)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase = text_generator('''''')
self.assertEqual(A__ ,[{'''generated_text''': ANY(A__)}])
else:
with self.assertRaises((ValueError, AssertionError)):
lowercase = text_generator('''''')
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator('''This is a test''' * 5_0_0 ,max_new_tokens=2_0)
lowercase = text_generator('''This is a test''' * 5_0_0 ,handle_long_generation='''hole''' ,max_new_tokens=2_0)
# Hole strategy cannot work
with self.assertRaises(A__):
text_generator(
'''This is a test''' * 5_0_0 ,handle_long_generation='''hole''' ,max_new_tokens=tokenizer.model_max_length + 1_0 ,)
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self):
import torch
# Classic `model_kwargs`
lowercase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' ,model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} ,)
self.assertEqual(pipe.model.device ,torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa)
lowercase = pipe('''This is a test''')
self.assertEqual(
A__ ,[
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] ,)
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' ,device_map='''auto''' ,torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device ,torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa)
lowercase = pipe('''This is a test''')
self.assertEqual(
A__ ,[
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] ,)
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' ,device_map='''auto''')
self.assertEqual(pipe.model.device ,torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.floataa)
lowercase = pipe('''This is a test''')
self.assertEqual(
A__ ,[
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] ,)
@require_torch
@require_torch_gpu
def A__ ( self):
import torch
lowercase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' ,device=0 ,torch_dtype=torch.floataa)
pipe('''This is a test''')
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self):
import torch
lowercase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' ,device_map='''auto''' ,torch_dtype=torch.floataa)
pipe('''This is a test''' ,do_sample=A__ ,top_p=0.5)
def A__ ( self):
lowercase = '''Hello world'''
lowercase = pipeline('''text-generation''' ,model='''hf-internal-testing/tiny-random-gpt2''')
if text_generator.model.framework == "tf":
lowercase = logging.get_logger('''transformers.generation.tf_utils''')
else:
lowercase = logging.get_logger('''transformers.generation.utils''')
lowercase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A__) as cl:
lowercase = text_generator(A__ ,max_length=1_0 ,max_new_tokens=1)
self.assertIn(A__ ,cl.out)
# The user only sets one -> no warning
with CaptureLogger(A__) as cl:
lowercase = text_generator(A__ ,max_new_tokens=1)
self.assertNotIn(A__ ,cl.out)
with CaptureLogger(A__) as cl:
lowercase = text_generator(A__ ,max_length=1_0)
self.assertNotIn(A__ ,cl.out)
| 633 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=30,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=2,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__SCREAMING_SNAKE_CASE,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = DeiTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = DeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[str] =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a : Tuple =(
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a : Any =False
a : Any =False
a : Union[str, Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,has_text_modality=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE,nn.Linear ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__SCREAMING_SNAKE_CASE )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__SCREAMING_SNAKE_CASE ),
*get_values(__SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
__lowerCAmelCase = problem_type["""title"""]
__lowerCAmelCase = problem_type["""num_labels"""]
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1,problem_type["""num_labels"""] )
__lowerCAmelCase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as warning_list:
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""",torch_dtype=torch.floataa,device_map="""auto""" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a_ : List[Any] = get_logger(__name__)
class snake_case :
"""simple docstring"""
_lowerCamelCase = "dummy_data"
_lowerCamelCase = "datasets"
_lowerCamelCase = False
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = dataset_name
lowerCamelCase_ = cache_dir
lowerCamelCase_ = use_local_dummy_data
lowerCamelCase_ = config
# download_callbacks take a single url as input
lowerCamelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase_ = str(UpperCamelCase )
# to be downloaded
lowerCamelCase_ = None
lowerCamelCase_ = None
@property
def snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCamelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase_ = cached_path(
UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase )
return os.path.join(UpperCamelCase , self.dummy_file_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def snake_case ( self ):
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase , UpperCamelCase ):
return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase )
else:
return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return path
def snake_case ( self ):
"""simple docstring"""
return {}
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase , UpperCamelCase ):
for single_url in single_urls:
download_callback(UpperCamelCase )
else:
lowerCamelCase_ = single_urls
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls]
else:
lowerCamelCase_ = single_urls
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) )
lowerCamelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url )
lowerCamelCase_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(UpperCamelCase )
return dummy_data_list
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase_ = Path(self.dummy_file ).parent
lowerCamelCase_ = path.relative_to(UpperCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase )
lowerCamelCase_ = Path(UpperCamelCase )
lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(UpperCamelCase , UpperCamelCase )
| 675 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.model"}
A_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
A_ = {
"google/rembert": 256,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ )
return pieces
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE_ )
return out_string
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 384 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.model"}
A_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
A_ = {
"google/rembert": 256,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ )
return pieces
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE_ )
return out_string
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 384 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCamelCase = ""
UpperCamelCase = ""
UpperCamelCase = ""
UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
_lowercase , _lowercase : List[str] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('Processing...' )
_lowercase , _lowercase , _lowercase : Optional[Any] = update_image_and_anno(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index, image in enumerate(SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowercase : Dict = random_chars(32 )
_lowercase : int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowercase : Union[str, Any] = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE )} with {file_name}""" )
_lowercase : Tuple = []
for anno in new_annos[index]:
_lowercase : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[list, list]:
_lowercase : Tuple = []
_lowercase : Optional[int] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '*.txt' ) ):
_lowercase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
_lowercase : Optional[int] = in_file.readlines()
_lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , F"""{label_name}.jpg""" )
_lowercase : List[Any] = []
for obj_list in obj_lists:
_lowercase : Optional[int] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 ) -> tuple[list, list, list]:
_lowercase : Optional[int] = []
_lowercase : Tuple = []
_lowercase : str = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Dict = []
_lowercase : Tuple = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE )
_lowercase : int = anno_list[idx]
_lowercase : int = cva.imread(SCREAMING_SNAKE_CASE )
if flip_type == 1:
_lowercase : int = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_lowercase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowercase : str = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_lowercase : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE )
new_imgs_list.append(SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( SCREAMING_SNAKE_CASE = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowercase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 66 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = parent
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase__ )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase__ )
A__ = [self.start]
A__ = False
def A (self ):
"""simple docstring"""
while self.node_queue:
A__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A__ = True
return self.retrace_path(lowerCamelCase__ )
A__ = self.get_successors(lowerCamelCase__ )
for node in successors:
self.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , lowerCamelCase__ ) )
return successors
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A__ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A__ = False
def A (self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A__ = self.fwd_bfs.node_queue.pop(0 )
A__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A__ = True
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A (self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = self.fwd_bfs.retrace_path(lowerCamelCase__ )
A__ = self.bwd_bfs.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ = time.time()
lowerCamelCase__ = BreadthFirstSearch(init, goal)
lowerCamelCase__ = bfs.search()
lowerCamelCase__ = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
lowerCamelCase__ = time.time()
lowerCamelCase__ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase__ = bd_bfs.search()
lowerCamelCase__ = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 574 | 0 |
class __SCREAMING_SNAKE_CASE:
def __init__( self: str , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] ) -> List[Any]:
snake_case__ = name
snake_case__ = val
def __str__( self: List[Any] ) -> Dict:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self: Optional[Any] , UpperCamelCase: Any ) -> str:
return self.val < other.val
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict , UpperCamelCase: Tuple ) -> str:
snake_case__ = {}
snake_case__ = {}
snake_case__ = self.build_heap(UpperCamelCase )
def __getitem__( self: int , UpperCamelCase: Optional[int] ) -> Optional[int]:
return self.get_value(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Optional[int] ) -> Any:
return (idx - 1) // 2
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: int ) -> int:
return idx * 2 + 1
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: int ) -> Dict:
return idx * 2 + 2
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] ) -> str:
return self.heap_dict[key]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Optional[int] ) -> Tuple:
snake_case__ = len(UpperCamelCase ) - 1
snake_case__ = self.get_parent_idx(UpperCamelCase )
for idx, i in enumerate(UpperCamelCase ):
snake_case__ = idx
snake_case__ = i.val
for i in range(UpperCamelCase , -1 , -1 ):
self.sift_down(UpperCamelCase , UpperCamelCase )
return array
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] ) -> Any:
while True:
snake_case__ = self.get_left_child_idx(UpperCamelCase ) # noqa: E741
snake_case__ = self.get_right_child_idx(UpperCamelCase )
snake_case__ = idx
if l < len(UpperCamelCase ) and array[l] < array[idx]:
snake_case__ = l
if r < len(UpperCamelCase ) and array[r] < array[smallest]:
snake_case__ = r
if smallest != idx:
snake_case__ , snake_case__ = array[smallest], array[idx]
(
(
snake_case__
) , (
snake_case__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case__ = smallest
else:
break
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: List[str] ) -> List[str]:
snake_case__ = self.get_parent_idx(UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case__ , snake_case__ = self.heap[idx], self.heap[p]
snake_case__ , snake_case__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case__ = p
snake_case__ = self.get_parent_idx(UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
return self.heap[0]
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.heap[-1], self.heap[0]
snake_case__ , snake_case__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Optional[Any] ) -> Optional[int]:
self.heap.append(UpperCamelCase )
snake_case__ = len(self.heap ) - 1
snake_case__ = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
return len(self.heap ) == 0
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: int ) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case__ = new_value
snake_case__ = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCamelCase : Union[str, Any] = Node("""R""", -1)
__UpperCamelCase : Any = Node("""B""", 6)
__UpperCamelCase : List[str] = Node("""A""", 3)
__UpperCamelCase : List[Any] = Node("""X""", 1)
__UpperCamelCase : List[str] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCamelCase : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
snake_case__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 372 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
UpperCAmelCase_ = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
UpperCAmelCase_ = """▁"""
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : List[Any]="[SEP]" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : int="[CLS]" , __lowerCAmelCase : List[str]="[MASK]" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def snake_case_ ( self : List[Any] ) -> int:
return len(self.sp_model )
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> str:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Dict , __lowerCAmelCase : Dict ) -> Tuple:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : str , __lowerCAmelCase : str ) -> Any:
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_A = unicodedata.normalize('''NFKD''' , __lowerCAmelCase )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]:
_A = self.preprocess_text(__lowerCAmelCase )
_A = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_A = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] ) -> Dict:
return self.sp_model.PieceToId(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
_A = []
_A = ''''''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_A = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 2 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : int = 'distilbert'
_UpperCamelCase : int = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , a : Union[str, Any]=30_522 , a : List[Any]=512 , a : Optional[Any]=False , a : Any=6 , a : int=12 , a : int=768 , a : int=4 * 768 , a : List[Any]=0.1 , a : Dict=0.1 , a : List[str]="gelu" , a : Union[str, Any]=0.02 , a : Dict=0.1 , a : Optional[Any]=0.2 , a : Optional[int]=0 , **a : Tuple , )-> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = sinusoidal_pos_embds
lowercase__ = n_layers
lowercase__ = n_heads
lowercase__ = dim
lowercase__ = hidden_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation
lowercase__ = initializer_range
lowercase__ = qa_dropout
lowercase__ = seq_classif_dropout
super().__init__(**a , pad_token_id=a )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 716 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a_ ( a ):
A__ : Tuple = 'speech_to_text_2'
A__ : int = ['past_key_values']
A__ : List[Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any]=10_000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Optional[int]=1_024 , **UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : List[Any] = vocab_size
snake_case : Dict = d_model
snake_case : Optional[int] = decoder_ffn_dim
snake_case : Optional[int] = decoder_layers
snake_case : int = decoder_attention_heads
snake_case : int = dropout
snake_case : List[Any] = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : Dict = activation_function
snake_case : Tuple = init_std
snake_case : str = decoder_layerdrop
snake_case : str = use_cache
snake_case : Optional[int] = decoder_layers
snake_case : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : Any = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 598 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a_ ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {}
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize feature_extractor
snake_case : List[Any] = self.feature_extraction_class()
# Test not batched input
snake_case : List[str] = get_html_strings()[0]
snake_case : Any = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
snake_case : List[str] = get_html_strings()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case : Union[str, Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 598 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : str = torch.device('cpu')
def __UpperCAmelCase ( ) -> Tuple:
UpperCAmelCase_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : Tuple = Image.open(requests.get(A , stream=A ).raw )
return im
def __UpperCAmelCase ( A : Optional[Any] ) -> Optional[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __UpperCAmelCase ( A : Dict , A : Dict , A : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = dct.pop(A )
UpperCAmelCase_ : Any = val
def __UpperCAmelCase ( A : Dict ) -> str:
UpperCAmelCase_ : Optional[int] = []
for k in state_dict.keys():
UpperCAmelCase_ : Dict = k
if ".pwconv" in k:
UpperCAmelCase_ : int = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
UpperCAmelCase_ : List[str] = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
UpperCAmelCase_ : Dict = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split('''.''' )
if ls[2].isdigit():
UpperCAmelCase_ : int = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
UpperCAmelCase_ : List[Any] = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCAmelCase ( A : Union[str, Any] , A : List[str] , A : Dict ) -> int:
UpperCAmelCase_ : str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Dict = 1_0_0_0
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : Any = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : int = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Any = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : List[str] = [3, 3, 6, 4]
UpperCAmelCase_ : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Any = [3, 3, 9, 6]
UpperCAmelCase_ : Any = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : Tuple = [4, 3, 1_0, 5]
UpperCAmelCase_ : Optional[Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : str = [4, 4, 1_2, 6]
UpperCAmelCase_ : Any = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' , check_hash=A )
else:
UpperCAmelCase_ : str = torch.load(A , map_location='''cpu''' )
UpperCAmelCase_ : List[Any] = checkpoint
UpperCAmelCase_ : List[str] = create_rename_keys(A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A , A , A )
# load HuggingFace model
UpperCAmelCase_ : Tuple = SwiftFormerForImageClassification(A ).eval()
hf_model.load_state_dict(A )
# prepare test inputs
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Optional[Any] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
UpperCAmelCase_ : int = processor(images=A , return_tensors='''pt''' )
# compare outputs from both models
UpperCAmelCase_ : str = get_expected_output(A )
UpperCAmelCase_ : Optional[Any] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , A , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(A )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Any = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 216 |
'''simple docstring'''
from collections import deque
def __UpperCAmelCase ( A : int ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = len(A )
UpperCAmelCase_ : Dict = deque()
UpperCAmelCase_ : Optional[Any] = [False for _ in range(A )]
UpperCAmelCase_ : str = [-1 for _ in range(A )]
UpperCAmelCase_ : Union[str, Any] = index_of[:]
def strong_connect(A : Union[str, Any] , A : Optional[Any] , A : List[Any] ):
UpperCAmelCase_ : Union[str, Any] = index # the number when this node is seen
UpperCAmelCase_ : List[Any] = index # lowest rank node reachable from here
index += 1
stack.append(A )
UpperCAmelCase_ : Optional[int] = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase_ : str = strong_connect(A , A , A )
UpperCAmelCase_ : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase_ : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = stack.pop()
UpperCAmelCase_ : Dict = False
component.append(A )
while w != v:
UpperCAmelCase_ : Optional[int] = stack.pop()
UpperCAmelCase_ : str = False
component.append(A )
components.append(A )
return index
UpperCAmelCase_ : str = []
for v in range(A ):
if index_of[v] == -1:
strong_connect(A , 0 , A )
return components
def __UpperCAmelCase ( A : List[Any] , A : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = [[] for _ in range(A )]
for u, v in edges:
g[u].append(A )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase : int = 7
_UpperCamelCase : List[str] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase : Optional[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase : Optional[Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 216 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A , _A , _A ):
# Initialise PyTorch model
lowerCAmelCase_ = BertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase_ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 431 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(rows * cols * num_images )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
lowerCAmelCase_ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = labels_dense.shape[0]
lowerCAmelCase_ = numpy.arange(_A ) * num_classes
lowerCAmelCase_ = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase_ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A , _A=False , _A=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(_A )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class A :
@deprecated(
UpperCamelCase__, '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''', )
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=dtypes.floataa, UpperCamelCase__=True, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase_ = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase_ = 1_0000
lowerCAmelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase_ = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase_ = images.astype(numpy.floataa )
lowerCAmelCase_ = numpy.multiply(UpperCamelCase__, 1.0 / 255.0 )
lowerCAmelCase_ = images
lowerCAmelCase_ = labels
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._images
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._labels
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._num_examples
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._epochs_completed
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=True ):
"""simple docstring"""
if fake_data:
lowerCAmelCase_ = [1] * 784
lowerCAmelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
lowerCAmelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perma]
lowerCAmelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase_ = self._num_examples - start
lowerCAmelCase_ = self._images[start : self._num_examples]
lowerCAmelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perm]
lowerCAmelCase_ = self.labels[perm]
# Start next epoch
lowerCAmelCase_ = 0
lowerCAmelCase_ = batch_size - rest_num_examples
lowerCAmelCase_ = self._index_in_epoch
lowerCAmelCase_ = self._images[start:end]
lowerCAmelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def __UpperCamelCase ( _A , _A , _A ):
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
lowerCAmelCase_ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
lowerCAmelCase_ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __UpperCamelCase ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=5000 , _A=None , _A=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
lowerCAmelCase_ = DEFAULT_SOURCE_URL
lowerCAmelCase_ = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
lowerCAmelCase_ = (
'''Validation size should be between 0 and '''
f"{len(_A )}. Received: {validation_size}."
)
raise ValueError(_A )
lowerCAmelCase_ = train_images[:validation_size]
lowerCAmelCase_ = train_labels[:validation_size]
lowerCAmelCase_ = train_images[validation_size:]
lowerCAmelCase_ = train_labels[validation_size:]
lowerCAmelCase_ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 431 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Tuple = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Tuple = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ['''LayoutLMv3FeatureExtractor''']
lowerCamelCase_ : Dict = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 265 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
SCREAMING_SNAKE_CASE__ : Any =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 434 | """simple docstring"""
import argparse
import json
import subprocess
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[int] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase : Optional[int] = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
_lowerCamelCase : List[str] = output.stdout.decode('''utf-8''' )
_lowerCamelCase : Union[str, Any] = json.loads(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCamelCase : Tuple = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
return values.split(''',''' )
SCREAMING_SNAKE_CASE__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 434 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , **a__ : str ):
"""simple docstring"""
super().__init__(**a__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self : Tuple , a__ : Union[str, List[str], "Image", List["Image"]] , **a__ : Optional[int] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : List[Any] , **a__ : Optional[int] ):
"""simple docstring"""
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def a (self : List[str] , a__ : Optional[int] , a__ : str=None , a__ : int="This is a photo of {}." ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(a__ ) for x in candidate_labels]
__snake_case = self.tokenizer(a__ , return_tensors=self.framework , padding=a__ )
__snake_case = [text_inputs]
return inputs
def a (self : Optional[int] , a__ : List[str] ):
"""simple docstring"""
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , a__ ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**a__ , **a__ )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(a__ , a__ ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(a__ , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(a__ , a__ ) , key=lambda a__ : -x[0] )
]
return result
| 704 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Any ) -> Optional[Any]:
__snake_case = checkpoint
__snake_case = {}
__snake_case = vae_state_dict['''encoder.conv_in.weight''']
__snake_case = vae_state_dict['''encoder.conv_in.bias''']
__snake_case = vae_state_dict['''encoder.conv_out.weight''']
__snake_case = vae_state_dict['''encoder.conv_out.bias''']
__snake_case = vae_state_dict['''encoder.norm_out.weight''']
__snake_case = vae_state_dict['''encoder.norm_out.bias''']
__snake_case = vae_state_dict['''decoder.conv_in.weight''']
__snake_case = vae_state_dict['''decoder.conv_in.bias''']
__snake_case = vae_state_dict['''decoder.conv_out.weight''']
__snake_case = vae_state_dict['''decoder.conv_out.bias''']
__snake_case = vae_state_dict['''decoder.norm_out.weight''']
__snake_case = vae_state_dict['''decoder.norm_out.bias''']
__snake_case = vae_state_dict['''quant_conv.weight''']
__snake_case = vae_state_dict['''quant_conv.bias''']
__snake_case = vae_state_dict['''post_quant_conv.weight''']
__snake_case = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
__snake_case = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
__snake_case = num_up_blocks - 1 - i
__snake_case = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , ) -> int:
# Only support V1
__snake_case = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case = io.BytesIO(r.content )
__snake_case = OmegaConf.load(snake_case_ )
__snake_case = 512
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case = {}
with safe_open(snake_case_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case = f.get_tensor(snake_case_ )
else:
__snake_case = torch.load(snake_case_ , map_location=snake_case_ )['''state_dict''']
# Convert the VAE model.
__snake_case = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
__snake_case = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
__snake_case = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 388 | 0 |
from collections import defaultdict
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCamelCase__ : str = 1
UpperCamelCase__ : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(__SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(__SCREAMING_SNAKE_CASE )
return ret
def _lowercase ( ) -> Union[str, Any]:
dfs(1 )
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 9
UpperCAmelCase__ : Optional[int] = defaultdict(list)
UpperCAmelCase__ : dict[int, bool] = {}
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[int] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 410 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Dict = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCAmelCase__ : int = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCAmelCase__ : Any = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCAmelCase__ : int = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase__ : Tuple = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase__ : Tuple = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCAmelCase__ : Optional[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__UpperCamelCase )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
elif titles is None or texts is None:
UpperCamelCase__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
UpperCamelCase , UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
UpperCamelCase__ : List[Any] = titles if not isinstance(UpperCamelCase , UpperCamelCase) else [titles]
UpperCamelCase__ : Union[str, Any] = texts if not isinstance(UpperCamelCase , UpperCamelCase) else [texts]
UpperCamelCase__ : Optional[int] = len(UpperCamelCase)
UpperCamelCase__ : str = questions if not isinstance(UpperCamelCase , UpperCamelCase) else [questions] * n_passages
if len(UpperCamelCase) != len(UpperCamelCase):
raise ValueError(
F"""There should be as many titles than texts but got {len(UpperCamelCase)} titles and {len(UpperCamelCase)} texts.""")
UpperCamelCase__ : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase)['input_ids']
UpperCamelCase__ : Optional[Any] = super().__call__(UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase)['input_ids']
UpperCamelCase__ : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase , UpperCamelCase)
]
}
if return_attention_mask is not False:
UpperCamelCase__ : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
UpperCamelCase__ : List[str] = attention_mask
return self.pad(UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 16 , UpperCamelCase = 64 , UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
UpperCamelCase__ : List[Any] = reader_input['input_ids']
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = reader_output[:3]
UpperCamelCase__ : Optional[int] = len(UpperCamelCase)
UpperCamelCase__ : Any = sorted(range(UpperCamelCase) , reverse=UpperCamelCase , key=relevance_logits.__getitem__)
UpperCamelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCamelCase__ : Tuple = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
UpperCamelCase__ : Tuple = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase__ : Union[str, Any] = sequence_ids.index(self.pad_token_id)
else:
UpperCamelCase__ : Tuple = len(UpperCamelCase)
UpperCamelCase__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase , top_spans=UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase , start_index=UpperCamelCase , end_index=UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[DPRSpanPrediction]:
UpperCamelCase__ : List[str] = []
for start_index, start_score in enumerate(UpperCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
UpperCamelCase__ : Tuple = sorted(UpperCamelCase , key=lambda UpperCamelCase: x[1] , reverse=UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""")
UpperCamelCase__ : int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCamelCase )
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
| 410 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class snake_case__ :
# setable values
_snake_case : Optional[int] = None
_snake_case : Optional[jnp.ndarray] = None
_snake_case : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def a__ ( cls ):
return cls()
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
_snake_case : KarrasVeSchedulerState
class snake_case__ ( snake_case_, snake_case_ ):
@property
def a__ ( self ):
return True
@register_to_config
def __init__( self , lowerCamelCase = 0.02 , lowerCamelCase = 100 , lowerCamelCase = 1.007 , lowerCamelCase = 80 , lowerCamelCase = 0.05 , lowerCamelCase = 50 , ):
pass
def a__ ( self ):
return KarrasVeSchedulerState.create()
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = () ):
__a = jnp.arange(0 , lowerCamelCase )[::-1].copy()
__a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCamelCase , schedule=jnp.array(lowerCamelCase , dtype=jnp.floataa ) , timesteps=lowerCamelCase , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if self.config.s_min <= sigma <= self.config.s_max:
__a = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__a = 0
# sample eps ~ N(0, S_noise^2 * I)
__a = random.split(lowerCamelCase , num=1 )
__a = self.config.s_noise * random.normal(key=lowerCamelCase , shape=sample.shape )
__a = sigma + gamma * sigma
__a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = sample_hat + sigma_hat * model_output
__a = (sample_hat - pred_original_sample) / sigma_hat
__a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase , derivative=lowerCamelCase , state=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = sample_prev + sigma_prev * model_output
__a = (sample_prev - pred_original_sample) / sigma_prev
__a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase , derivative=lowerCamelCase , state=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
raise NotImplementedError()
| 715 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : str , lowerCAmelCase_ : Any ) -> Tuple:
UpperCAmelCase_ : List[str] = parent
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return {}
def snake_case ( ):
UpperCAmelCase_ : Dict = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
UpperCAmelCase_ : List[Any] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = MarkupLMFeatureExtractionTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
# Initialize feature_extractor
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase_ : Dict = get_html_strings()[0]
UpperCAmelCase_ : Dict = feature_extractor(lowerCAmelCase_ )
# fmt: off
UpperCAmelCase_ : Union[str, Any] = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
UpperCAmelCase_ : List[str] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
# Test batched
UpperCAmelCase_ : Any = get_html_strings()
UpperCAmelCase_ : List[Any] = feature_extractor(lowerCAmelCase_ )
# fmt: off
UpperCAmelCase_ : List[Any] = expected_nodes + [["My First Heading", "My first paragraph."]]
UpperCAmelCase_ : Tuple = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
# authorize twitter, initialize tweepy
A_ = tweepy.OAuthHandler(UpperCAmelCase__, UpperCAmelCase__ )
auth.set_access_token(UpperCAmelCase__, UpperCAmelCase__ )
A_ = tweepy.API(UpperCAmelCase__ )
# initialize a list to hold all the tweepy Tweets
A_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A_ = api.user_timeline(screen_name=UpperCAmelCase__, count=2_00 )
# save most recent tweets
alltweets.extend(UpperCAmelCase__ )
# save the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCAmelCase__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
A_ = api.user_timeline(
screen_name=UpperCAmelCase__, count=2_00, max_id=UpperCAmelCase__ )
# save most recent tweets
alltweets.extend(UpperCAmelCase__ )
# update the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
print(F'''...{len(UpperCAmelCase__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
A_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''', """w""" ) as f:
A_ = csv.writer(UpperCAmelCase__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCAmelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 288 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[Any]:
a : Dict = size if size is not None else {"height": 20, "width": 20}
a : List[str] = parent
a : Dict = batch_size
a : Dict = num_channels
a : int = image_size
a : Any = min_resolution
a : Tuple = max_resolution
a : Optional[Any] = size
a : Any = do_normalize
a : Tuple = do_convert_rgb
a : Union[str, Any] = [512, 1024, 2048, 4096]
a : List[str] = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __a ( self ) -> str:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self ) -> Optional[Any]:
a : List[Any] = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
a : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> Optional[int]:
a : List[str] = PixaStructImageProcessingTester(self )
@property
def __a ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> List[Any]:
a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def __a ( self ) -> List[Any]:
a : Any = self.image_processor_tester.prepare_dummy_image()
a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
a : Union[str, Any] = 2048
a : Dict = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def __a ( self ) -> Dict:
# Initialize image_processor
a : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Optional[int] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Optional[int]:
# Initialize image_processor
a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
a : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
a : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
a : List[Any] = "Hello"
a : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Any:
# Initialize image_processor
a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
a : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Optional[Any]:
# Initialize image_processor
a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a : Dict = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> str:
a : int = PixaStructImageProcessingTester(self , num_channels=4 )
a : List[Any] = 3
@property
def __a ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> List[str]:
a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def __a ( self ) -> List[str]:
# Initialize image_processor
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Optional[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 31 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__magic_name__ : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split("." ):
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
_snake_case = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case = "weight_g"
elif "weight_v" in name:
_snake_case = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = "weight"
else:
_snake_case = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = full_name.split("conv_layers." )[-1]
_snake_case = name.split("." )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
_snake_case = WavLMConfigOrig(checkpoint["cfg"] )
_snake_case = WavLMOrig(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_snake_case = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = WavLMConfig()
_snake_case = WavLMModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__magic_name__ : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 672 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
_lowerCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_lowerCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
_lowerCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowercase ( _a ,_a=False ) -> int:
UpperCAmelCase_: Tuple = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase__ ,REMOTE_MODEL_PATHS[key]["file_name"] )
def lowercase ( _a ,_a ) -> int:
os.makedirs(UpperCAmelCase__ ,exist_ok=UpperCAmelCase__ )
hf_hub_download(repo_id=UpperCAmelCase__ ,filename=UpperCAmelCase__ ,local_dir=UpperCAmelCase__ )
def lowercase ( _a ,_a ,_a=False ,_a="text" ) -> str:
if model_type == "text":
UpperCAmelCase_: List[Any] = BarkSemanticModel
UpperCAmelCase_: str = BarkSemanticConfig
UpperCAmelCase_: Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase_: Union[str, Any] = BarkCoarseModel
UpperCAmelCase_: List[str] = BarkCoarseConfig
UpperCAmelCase_: int = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase_: Tuple = BarkFineModel
UpperCAmelCase_: int = BarkFineConfig
UpperCAmelCase_: Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase_: List[str] = f"{model_type}_small" if use_small else model_type
UpperCAmelCase_: Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase__ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] ,model_info["file_name"] )
UpperCAmelCase_: int = torch.load(UpperCAmelCase__ ,map_location=UpperCAmelCase__ )
# this is a hack
UpperCAmelCase_: int = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
UpperCAmelCase_: Optional[Any] = model_args["""vocab_size"""]
UpperCAmelCase_: Dict = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase_: Dict = model_args.pop("n_head" )
UpperCAmelCase_: Optional[int] = model_args.pop("n_embd" )
UpperCAmelCase_: Optional[Any] = model_args.pop("n_layer" )
UpperCAmelCase_: Union[str, Any] = ConfigClass(**checkpoint["model_args"] )
UpperCAmelCase_: List[str] = ModelClass(config=UpperCAmelCase__ )
UpperCAmelCase_: int = GenerationConfigClass()
UpperCAmelCase_: List[Any] = model_generation_config
UpperCAmelCase_: str = checkpoint["""model"""]
# fixup checkpoint
UpperCAmelCase_: Any = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase_: str = k[len(UpperCAmelCase__ ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase_: str = new_k.replace(UpperCAmelCase__ ,new_layer_name_dict[old_layer_name] )
UpperCAmelCase_: Dict = state_dict.pop(UpperCAmelCase__ )
UpperCAmelCase_: str = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase_: str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
UpperCAmelCase_: List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase_: List[str] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(UpperCAmelCase__ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(UpperCAmelCase__ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(UpperCAmelCase__ ,strict=UpperCAmelCase__ )
UpperCAmelCase_: Dict = model.num_parameters(exclude_embeddings=UpperCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(f"model loaded: {round(n_params/1e6 ,1 )}M params, {round(UpperCAmelCase__ ,3 )} loss" )
model.eval()
model.to(UpperCAmelCase__ )
del checkpoint, state_dict
return model
def lowercase ( _a ,_a=False ,_a="text" ) -> str:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase_: str = """cpu""" # do conversion on cpu
UpperCAmelCase_: List[str] = _get_ckpt_path(UpperCAmelCase__ ,use_small=UpperCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = _load_model(UpperCAmelCase__ ,UpperCAmelCase__ ,model_type=UpperCAmelCase__ ,use_small=UpperCAmelCase__ )
# load bark initial model
UpperCAmelCase_: Union[str, Any] = _bark_load_model(UpperCAmelCase__ ,"cpu" ,model_type=UpperCAmelCase__ ,use_small=UpperCAmelCase__ )
if model_type == "text":
UpperCAmelCase_: List[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
UpperCAmelCase_: str = 5
UpperCAmelCase_: Any = 10
if model_type in ["text", "coarse"]:
UpperCAmelCase_: List[Any] = torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int )
UpperCAmelCase_: Tuple = bark_model(UpperCAmelCase__ )[0]
UpperCAmelCase_: List[Any] = model(UpperCAmelCase__ )
# take last logits
UpperCAmelCase_: Dict = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase_: Dict = 3
UpperCAmelCase_: Dict = 8
UpperCAmelCase_: Optional[Any] = torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
UpperCAmelCase_: int = model(UpperCAmelCase__ ,UpperCAmelCase__ )
UpperCAmelCase_: Tuple = bark_model(UpperCAmelCase__ ,UpperCAmelCase__ )
UpperCAmelCase_: Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowercase ( _a ,_a ,_a ,_a ,_a ,_a ,) -> List[str]:
UpperCAmelCase_: Dict = os.path.join(UpperCAmelCase__ ,UpperCAmelCase__ )
UpperCAmelCase_: str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ ,"config.json" ) )
UpperCAmelCase_: Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ ,"config.json" ) )
UpperCAmelCase_: List[Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ ,"config.json" ) )
UpperCAmelCase_: Any = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
UpperCAmelCase_: List[str] = BarkSemanticModel.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_: str = BarkCoarseModel.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_: List[Any] = BarkFineModel.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_: Tuple = EncodecModel.from_pretrained("facebook/encodec_24khz" )
UpperCAmelCase_: Union[str, Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
UpperCAmelCase_: List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
UpperCAmelCase_: Union[str, Any] = BarkModel(UpperCAmelCase__ )
UpperCAmelCase_: str = semantic
UpperCAmelCase_: Optional[int] = coarseAcoustic
UpperCAmelCase_: Tuple = fineAcoustic
UpperCAmelCase_: Union[str, Any] = codec
UpperCAmelCase_: Tuple = bark_generation_config
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
bark.save_pretrained(UpperCAmelCase__ ,repo_id=UpperCAmelCase__ ,push_to_hub=UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
_lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 708 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
snake_case_ = TransfoXLTokenizer
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: Any = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
UpperCAmelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = "<unk> UNwanted , running"
UpperCAmelCase_: List[str] = "<unk> unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A__ )
UpperCAmelCase_: Optional[int] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(A__ , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [0, 4, 8, 7] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = TransfoXLTokenizer(lower_case=A__ )
UpperCAmelCase_: Optional[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
UpperCAmelCase_: Optional[int] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(A__ ) , A__ )
self.assertEqual(tokenizer.convert_tokens_to_string(A__ ) , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.get_tokenizer()
UpperCAmelCase_: Optional[Any] = len(A__ )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" ) | 306 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase_ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowerCAmelCase_ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowerCAmelCase_ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def snake_case( __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__="binary" ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = simple_accuracy(__magic_name__ , __magic_name__ )
lowercase : Tuple = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case( __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : str = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
lowercase : Optional[Any] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowercase : Union[str, Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase : List[Any] = [(pred, label)]
lowercase , lowercase : List[str] = [], []
for question, preds_labels in question_map.items():
lowercase , lowercase : Union[str, Any] = zip(*__magic_name__ )
lowercase : Dict = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average='''macro''' )
fas.append(__magic_name__ )
lowercase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
lowercase : str = float(sum(__magic_name__ ) / len(__magic_name__ ) )
lowercase : List[Any] = sum(__magic_name__ ) / len(__magic_name__ )
lowercase : List[Any] = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[str] ) -> int:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __a ( self : Tuple , _A : Optional[Any] , _A : str ) -> List[str]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "cb":
return acc_and_fa(_A , _A , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase : Tuple = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_A , _A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_A , _A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) | 217 |
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 217 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase, UpperCAmelCase = 9, 14 # noqa: F841
UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase = mst(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase = tuple(answer[:2] )
UpperCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 720 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a__ : int = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase ="dummy_data"
_lowerCamelCase ="datasets"
_lowerCamelCase =False
def __init__( self : str , a__ : str , a__ : str , a__ : Union[Version, str] , a__ : Optional[str] = None , a__ : bool = False , a__ : bool = True , a__ : Optional[List[Callable]] = None , ):
UpperCAmelCase = 0
UpperCAmelCase = dataset_name
UpperCAmelCase = cache_dir
UpperCAmelCase = use_local_dummy_data
UpperCAmelCase = config
# download_callbacks take a single url as input
UpperCAmelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase = str(a__ )
# to be downloaded
UpperCAmelCase = None
UpperCAmelCase = None
@property
def __snake_case ( self : int ):
if self._dummy_file is None:
UpperCAmelCase = self.download_dummy_data()
return self._dummy_file
@property
def __snake_case ( self : int ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __snake_case ( self : List[str] ):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase = cached_path(
a__ , cache_dir=self.cache_dir , extract_compressed_file=a__ , force_extract=a__ )
return os.path.join(a__ , self.dummy_file_name )
@property
def __snake_case ( self : Any ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __snake_case ( self : int ):
if self._bucket_url is None:
UpperCAmelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __snake_case ( self : Optional[Any] ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __snake_case ( self : int , a__ : List[str] , *a__ : List[Any] ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a__ , a__ ):
return self.create_dummy_data_dict(a__ , a__ )
elif isinstance(a__ , (list, tuple) ):
return self.create_dummy_data_list(a__ , a__ )
else:
return self.create_dummy_data_single(a__ , a__ )
def __snake_case ( self : Optional[int] , a__ : Union[str, Any] , *a__ : List[Any] ):
return self.download_and_extract(a__ )
def __snake_case ( self : str , a__ : List[Any] , a__ : Union[str, Any] ):
return self.download_and_extract(a__ )
def __snake_case ( self : Optional[Any] , a__ : Dict , *a__ : str , **a__ : List[Any] ):
return path
def __snake_case ( self : List[str] ):
return {}
def __snake_case ( self : Tuple , a__ : int , a__ : Dict ):
UpperCAmelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a__ , a__ ):
for single_url in single_urls:
download_callback(a__ )
else:
UpperCAmelCase = single_urls
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a__ , a__ ):
UpperCAmelCase = [os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) ) for x in single_urls]
else:
UpperCAmelCase = single_urls
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) )
UpperCAmelCase = value
# make sure that values are unique
if all(isinstance(a__ , a__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __snake_case ( self : Union[str, Any] , a__ : int , a__ : str ):
UpperCAmelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , a__ ) ) for url in data_url )
UpperCAmelCase = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase = [data_url[0]] * len(a__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(a__ )
return dummy_data_list
def __snake_case ( self : List[str] , a__ : Union[str, Any] , a__ : str ):
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(a__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Optional[Any] ):
pass
def __snake_case ( self : List[Any] , a__ : Optional[Any] ):
def _iter_archive_members(a__ : int ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase = Path(self.dummy_file ).parent
UpperCAmelCase = path.relative_to(a__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a__ )
UpperCAmelCase = Path(a__ )
UpperCAmelCase = _iter_archive_members(a__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(a__ ).as_posix(), file_path.open('''rb''' )
def __snake_case ( self : Optional[int] , a__ : Union[str, Any] ):
if not isinstance(a__ , a__ ):
UpperCAmelCase = [paths]
for path in paths:
if os.path.isfile(a__ ):
if os.path.basename(a__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a__ ):
if os.path.basename(a__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(a__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(a__ , a__ )
| 570 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__snake_case = CodeGenTokenizer
__snake_case = CodeGenTokenizerFast
__snake_case = True
__snake_case = {'add_prefix_space': True}
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowerCAmelCase_ = dict(zip(_snake_case, range(len(_snake_case ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase_ = tokenizer.tokenize(_snake_case, add_prefix_space=_snake_case )
self.assertListEqual(_snake_case, _snake_case )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_snake_case )
lowerCAmelCase_ = '''lower newer'''
# Testing tokenization
lowerCAmelCase_ = tokenizer.tokenize(_snake_case, add_prefix_space=_snake_case )
lowerCAmelCase_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
# Testing conversion to ids without special tokens
lowerCAmelCase_ = tokenizer.encode(_snake_case, add_special_tokens=_snake_case, add_prefix_space=_snake_case )
lowerCAmelCase_ = rust_tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
self.assertListEqual(_snake_case, _snake_case )
# Testing conversion to ids with special tokens
lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_snake_case )
lowerCAmelCase_ = tokenizer.encode(_snake_case, add_prefix_space=_snake_case )
lowerCAmelCase_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
# Testing the unknown token
lowerCAmelCase_ = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_snake_case ), _snake_case )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_snake_case, **_snake_case )
# Simple input
lowerCAmelCase_ = '''This is a simple input'''
lowerCAmelCase_ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase_ = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case, tokenizer_r.encode, _snake_case, max_length=_snake_case, padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case, tokenizer_r.encode_plus, _snake_case, max_length=_snake_case, padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case, tokenizer_r.batch_encode_plus, _snake_case, max_length=_snake_case, padding='''max_length''', )
# Pair input
self.assertRaises(_snake_case, tokenizer_r.encode, _snake_case, max_length=_snake_case, padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case, tokenizer_r.encode_plus, _snake_case, max_length=_snake_case, padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case, tokenizer_r.batch_encode_plus, _snake_case, max_length=_snake_case, padding='''max_length''', )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='''<pad>''' )
# Simple input
lowerCAmelCase_ = '''This is a simple input'''
lowerCAmelCase_ = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowerCAmelCase_ = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase_ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowerCAmelCase_ = tokenizer.pad_token_id
lowerCAmelCase_ = tokenizer(_snake_case, padding='''max_length''', max_length=30, return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(_snake_case, padding=_snake_case, truncate=_snake_case, return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(*_snake_case, padding='''max_length''', max_length=60, return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(_snake_case, padding=_snake_case, truncate=_snake_case, return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''$$$'''
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=_snake_case, add_bos_token=_snake_case )
lowerCAmelCase_ = '''This is a simple input'''
lowerCAmelCase_ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase_ = tokenizer.bos_token_id
lowerCAmelCase_ = tokenizer(_snake_case )
lowerCAmelCase_ = tokenizer(_snake_case )
self.assertEqual(out_s.input_ids[0], _snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
lowerCAmelCase_ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
lowerCAmelCase_ = '''\nif len_a > len_b: result = a\nelse: result = b'''
lowerCAmelCase_ = tokenizer.encode(_snake_case )
lowerCAmelCase_ = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^\"\"\"''', '''\n\n\n''']
lowerCAmelCase_ = tokenizer.decode(_snake_case, truncate_before_pattern=_snake_case )
self.assertEqual(_snake_case, _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 431 | """simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 159 | 0 |
import os
__lowerCamelCase : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while index < len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ = line.strip()
SCREAMING_SNAKE_CASE__ = parse_roman_numerals(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__lowerCamelCase : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 379 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
# ===== initialization =====
_lowercase : int = Mock()
_lowercase : int = conn, Mock()
_lowercase : Tuple = iter([1, None] )
_lowercase : Optional[int] = lambda SCREAMING_SNAKE_CASE : next(_lowerCAmelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=_lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 66 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = 10
def a_ ( _lowerCAmelCase ) -> list[int]:
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = max(_lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(_lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[str] = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCAmelCase )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Dict = 0
for b in range(_lowerCAmelCase ):
for i in buckets[b]:
__lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : Dict = "layoutlmv3"
def __init__( self : Dict , __lowerCAmelCase : int=5_0_2_6_5 , __lowerCAmelCase : Dict=7_6_8 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : Optional[int]=1_2_8 , __lowerCAmelCase : Optional[int]=1_2_8 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : List[str]=6_4 , __lowerCAmelCase : Tuple=2_5_6 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=2_2_4 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = max_ad_position_embeddings
_lowerCamelCase : Dict = coordinate_size
_lowerCamelCase : List[str] = shape_size
_lowerCamelCase : Any = has_relative_attention_bias
_lowerCamelCase : Tuple = rel_pos_bins
_lowerCamelCase : Tuple = max_rel_pos
_lowerCamelCase : Union[str, Any] = has_spatial_attention_bias
_lowerCamelCase : Any = rel_ad_pos_bins
_lowerCamelCase : Tuple = max_rel_ad_pos
_lowerCamelCase : Any = text_embed
_lowerCamelCase : str = visual_embed
_lowerCamelCase : Optional[int] = input_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : int = classifier_dropout
class __snake_case ( _lowercase):
snake_case__ : str = version.parse("1.12")
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return 1_2
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 4_0 , __lowerCAmelCase : int = 4_0 , ):
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : int = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : str = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCamelCase : Optional[int] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCamelCase : Union[str, Any] = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 712 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = GPTaTokenizer
snake_case__ : str = GPTaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : Union[str, Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : int = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : str = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Tuple = '''This is a simple input'''
_lowerCamelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : List[str] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : int = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[Any] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''$$$'''
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : Any = '''This is a simple input'''
_lowerCamelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : str = '''Encode this.'''
_lowerCamelCase : Optional[Any] = '''This one too please.'''
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : str = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : Optional[int] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''bos'''
_lowerCamelCase : Optional[Any] = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : Any = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 598 | 0 |
from __future__ import annotations
class __snake_case :
def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = text, pattern
_lowerCAmelCase , _lowerCAmelCase : int = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : str ) -> list[int]:
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase : int = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
_lowerCAmelCase : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase : Optional[Any] = "ABAABA"
_lowerCamelCase : List[str] = "AB"
_lowerCamelCase : Union[str, Any] = BoyerMooreSearch(text, pattern)
_lowerCamelCase : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 429 |
import math
from datetime import datetime, timedelta
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = year % 19
_lowerCAmelCase : Tuple = year % 4
_lowerCAmelCase : Dict = year % 7
_lowerCAmelCase : Optional[Any] = math.floor(year / 100 )
_lowerCAmelCase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCAmelCase : int = leap_day_inhibits / 4
_lowerCAmelCase : List[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCAmelCase : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCAmelCase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_lowerCamelCase : List[Any] = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 429 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=True , lowerCAmelCase=1 / 255 , lowerCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def A__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase_ = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = self.size["shortest_edge"]
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
UpperCAmelCase_ = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ):
UpperCAmelCase_ = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase , "size" ) )
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ):
# prepare image and target
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
UpperCAmelCase_ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase ) )
@slow
def A__ ( self ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
UpperCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor(format="coco_panoptic" )
UpperCAmelCase_ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase ) )
# verify masks
UpperCAmelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase ) )
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _lowerCAmelCase ( _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def _lowerCAmelCase ( _lowerCAmelCase ) -> str:
'''simple docstring'''
for char in word:
__snake_case = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case = set()
for token in tokens:
__snake_case = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
__snake_case = list(_lowerCAmelCase )
return word_list
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__snake_case = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
__snake_case = bert_tokens
__snake_case , __snake_case = 0, len(_lowerCAmelCase )
while start < end:
__snake_case = True
if is_chinese(bert_word[start] ):
__snake_case = min(end - start , _lowerCAmelCase )
for i in range(_lowerCAmelCase , 1 , -1 ):
__snake_case = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__snake_case = "##" + bert_word[j]
__snake_case = start + i
__snake_case = False
break
if single_word:
start += 1
return bert_word
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
__snake_case = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__snake_case = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
__snake_case = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
__snake_case = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
__snake_case = []
for input_ids, chinese_word in zip(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = []
for id in input_ids:
__snake_case = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
__snake_case = add_sub_symbol(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
__snake_case = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def _lowerCAmelCase ( _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__snake_case = f.readlines()
__snake_case = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__snake_case = LTP(args.ltp ) # faster in GPU device
__snake_case = BertTokenizer.from_pretrained(args.bert )
__snake_case = prepare_ref(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__snake_case = [json.dumps(_lowerCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
A : Optional[Any] = parser.parse_args()
main(args)
| 371 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase( unittest.TestCase ):
snake_case_ : int = MODEL_FOR_MASKED_LM_MAPPING
snake_case_ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8_0_1_5,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5_5_0_6,
"token_str": " accuser",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2_9_4_1, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
] , )
__snake_case = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__snake_case = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(SCREAMING_SNAKE_CASE )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_2_0_1,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2_7_9_0,
"token_str": " Lyon",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = fill_masker.tokenizer
__snake_case = fill_masker.model
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker("This is" )
self.run_test_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , targets=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Call argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Score equivalence
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["token_str"] for top_mask in outputs]
__snake_case = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , top_k=2 )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__snake_case = [el["token_str"] for el in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ).issubset(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
| 371 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowercase ( __snake_case ):
UpperCamelCase = '''Wav2Vec2FeatureExtractor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = self.feature_extractor
UpperCAmelCase = False
@classmethod
def _lowercase ( cls : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> int:
"""simple docstring"""
try:
return super().from_pretrained(__lowerCamelCase , **__lowerCamelCase )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __lowerCamelCase , )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = WavaVecaCTCTokenizer.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
return cls(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
def __call__( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
UpperCAmelCase = kwargs.pop("""raw_speech""" )
else:
UpperCAmelCase = kwargs.pop("""audio""" , __lowerCamelCase )
UpperCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCamelCase )
UpperCAmelCase = kwargs.pop("""text""" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCAmelCase = args[0]
UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
UpperCAmelCase = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCAmelCase = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = kwargs.pop("""input_features""" , __lowerCamelCase )
UpperCAmelCase = kwargs.pop("""labels""" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCAmelCase = args[0]
UpperCAmelCase = args[1:]
if input_features is not None:
UpperCAmelCase = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
UpperCAmelCase = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase = labels["""input_ids"""]
return input_features
def _lowercase ( self : int , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer
yield
UpperCAmelCase = self.feature_extractor
UpperCAmelCase = False
| 627 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627 | 1 |
from timeit import timeit
def __a ( A__ : int ):
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE = 0
while number:
number &= number - 1
result += 1
return result
def __a ( A__ : int ):
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __a ( ):
def do_benchmark(A__ : int ) -> None:
SCREAMING_SNAKE_CASE = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowercase__ ) = }" )
SCREAMING_SNAKE_CASE = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowercase__ )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowercase__ ) = }" )
SCREAMING_SNAKE_CASE = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowercase__ , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 16 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) | 45 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.txt"""}
_lowercase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_lowercase = {
"""openbmb/cpm-ant-10b""": 1024,
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = collections.OrderedDict()
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase="<unk>" , _lowercase=200 ):
"""simple docstring"""
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(_lowercase ):
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
_lowerCAmelCase = end
return sub_tokens
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
_lowercase : Union[str, Any] = False
def __init__( self , _lowercase , _lowercase="<d>" , _lowercase="</d>" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="</n>" , _lowercase="</_>" , _lowercase="left" , **_lowercase , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(_lowercase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return token in self.encoder
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 162 |
'''simple docstring'''
from math import pi, sqrt
def A (__lowerCamelCase :float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def A ():
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 162 | 1 |
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) | 141 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> str:
# Initialise PyTorch model
_A = BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
_A = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 317 | 0 |
from ..utils import DummyObject, requires_backends
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(_UpperCAmelCase , ["""torch"""] )
def _lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(_UpperCAmelCase , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
class __A ( metaclass=a ):
__A = ["""torch"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""torch"""] )
| 269 |
from math import sqrt
def _lowercase ( _UpperCAmelCase ) -> int:
lowerCamelCase =0
for i in range(1 , int(sqrt(_UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_UpperCAmelCase ):
total += i + n // i
elif i == sqrt(_UpperCAmelCase ):
total += i
return total - n
def _lowercase ( _UpperCAmelCase = 1_00_00 ) -> int:
lowerCamelCase =sum(
i
for i in range(1 , _UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(_UpperCAmelCase ) ) == i and sum_of_divisors(_UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 269 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> List[str]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase_ ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> List[Any]:
__lowerCamelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowerCamelCase : Tuple = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__lowerCamelCase : List[Any] = PipelineDataFormat.from_str(
format=UpperCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCAmelCase_ , UpperCAmelCase_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Tuple = nlp
__lowerCamelCase : Dict = reader
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : str = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=SCREAMING_SNAKE_CASE_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=SCREAMING_SNAKE_CASE_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=SCREAMING_SNAKE_CASE_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=SCREAMING_SNAKE_CASE_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=SCREAMING_SNAKE_CASE_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> int:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self._nlp, []
for entry in self._reader:
__lowerCamelCase : Tuple = nlp(**SCREAMING_SNAKE_CASE_ ) if self._reader.is_multi_columns else nlp(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
outputs.append(SCREAMING_SNAKE_CASE_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowerCamelCase : List[str] = self._reader.save_binary(SCREAMING_SNAKE_CASE_ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(SCREAMING_SNAKE_CASE_ )
| 13 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Tuple ,*_UpperCAmelCase : str ,**_UpperCAmelCase : int ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' ,_UpperCAmelCase ,)
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
| 358 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = StableUnCLIPPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__: str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__: Union[str, Any] = False
def __A ( self ):
A__ : Dict = 32
A__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=A__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A__ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[int] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=A__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
A__ : Dict = StableUnCLIPImageNormalizer(embedding_dim=A__ )
A__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : List[Any] = AutoencoderKL()
A__ : Union[str, Any] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Optional[int] = torch.manual_seed(A__ )
else:
A__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
A__ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def __A ( self ):
A__ : str = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : List[str] = pipe("""anime turle""" , generator=A__ , output_type="""np""" )
A__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Optional[int] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
A__ : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Any = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
A__ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 108 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Union[str, Any] ={}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''llama'''
snake_case_ = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32_000 , _UpperCAmelCase=4_096 , _UpperCAmelCase=11_008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2_048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a_ = num_attention_heads
a_ = num_key_value_heads
a_ = hidden_act
a_ = initializer_range
a_ = rms_norm_eps
a_ = pretraining_tp
a_ = use_cache
a_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
a_ = self.rope_scaling.get("""type""" , _UpperCAmelCase )
a_ = self.rope_scaling.get("""factor""" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 483 | 0 |
import string
from math import logaa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
snake_case_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ = corpus_without_punctuation.split("\n" )
snake_case_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return round(tf * idf , 3 )
| 701 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowercase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
lowercase_ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowercase_ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowercase_ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def snake_case_( self , A , A , A=None , A=1 , A="binary" , A=None , A="warn" , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = recall_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A , zero_division=__A , )
return {"recall": float(__A ) if score.size == 1 else score}
| 314 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import string
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Optional[int] = ""
for i in sequence:
a__ : List[str] = ord(__UpperCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Dict = string.ascii_letters
a__ : Optional[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__UpperCamelCase )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE( ) -> None:
from timeit import timeit
print("Running performance benchmarks..." )
a__ : Optional[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__UpperCamelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=__UpperCamelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 207 | 0 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
import random
from typing import Any
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list[Any]:
for _ in range(len(_A ) ):
_lowerCAmelCase = random.randint(0 , len(_A ) - 1 )
_lowerCAmelCase = random.randint(0 , len(_A ) - 1 )
_lowerCAmelCase , _lowerCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ = ['python', 'says', 'hello', '!']
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 664 | 0 |
def snake_case (__lowercase ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_50_00_00 ) -> int:
_SCREAMING_SNAKE_CASE = defaultdict(__A )
_SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 418 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = (KDPMaDiscreteScheduler,)
_SCREAMING_SNAKE_CASE = 10
def _snake_case ( self , **lowercase ) -> List[Any]:
lowerCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase )
return config
def _snake_case ( self ) -> Dict:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def _snake_case ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def _snake_case ( self ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def _snake_case ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(lowercase , lowercase )
lowerCAmelCase = model(lowercase , lowercase )
lowerCAmelCase = scheduler.step(lowercase , lowercase , lowercase )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(lowercase ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def _snake_case ( self ) -> str:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(lowercase , lowercase )
lowerCAmelCase = model(lowercase , lowercase )
lowerCAmelCase = scheduler.step(lowercase , lowercase , lowercase )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(lowercase ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def _snake_case ( self ) -> str:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(lowercase , lowercase )
lowerCAmelCase = model(lowercase , lowercase )
lowerCAmelCase = scheduler.step(lowercase , lowercase , lowercase )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(lowercase ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase ) )
if str(lowercase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 393 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 393 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a__ ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = params
__UpperCAmelCase : str = np.array(SCREAMING_SNAKE_CASE__)
__UpperCAmelCase : List[Any] = np.array([len(SCREAMING_SNAKE_CASE__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , UpperCamelCase_ : Dict):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any]):
"""simple docstring"""
return len(self.lengths)
def a_ ( self : List[Any]):
"""simple docstring"""
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.params.max_model_input_size
__UpperCAmelCase : Any = self.lengths > max_len
logger.info(F"Splitting {sum(SCREAMING_SNAKE_CASE__)} too long sequences.")
def divide_chunks(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)]
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[Any] = []
if self.params.mlm:
__UpperCAmelCase : List[str] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__UpperCAmelCase : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
__UpperCAmelCase : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
__UpperCAmelCase : List[str] = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__)
if sub_s[-1] != sep_id:
__UpperCAmelCase : Tuple = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)
assert len(SCREAMING_SNAKE_CASE__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__)
new_tok_ids.extend(SCREAMING_SNAKE_CASE__)
new_lengths.extend([len(SCREAMING_SNAKE_CASE__) for l in sub_seqs])
__UpperCAmelCase : str = np.array(SCREAMING_SNAKE_CASE__)
__UpperCAmelCase : str = np.array(SCREAMING_SNAKE_CASE__)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = len(self)
__UpperCAmelCase : Any = self.lengths > 11
__UpperCAmelCase : Optional[int] = self.token_ids[indices]
__UpperCAmelCase : List[str] = self.lengths[indices]
__UpperCAmelCase : Tuple = len(self)
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences.")
def a_ ( self : int):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase : Optional[Any] = self.params.special_tok_ids['unk_token']
__UpperCAmelCase : Optional[Any] = len(self)
__UpperCAmelCase : Union[str, Any] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
__UpperCAmelCase : Optional[Any] = (unk_occs / self.lengths) < 0.5
__UpperCAmelCase : str = self.token_ids[indices]
__UpperCAmelCase : List[str] = self.lengths[indices]
__UpperCAmelCase : Tuple = len(self)
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).")
def a_ ( self : Optional[int]):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"{len(self)} sequences")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def a_ ( self : str , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [t[0] for t in batch]
__UpperCAmelCase : List[Any] = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__)
# Max for paddings
__UpperCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__)
# Pad token ids
if self.params.mlm:
__UpperCAmelCase : Tuple = self.params.special_tok_ids['pad_token']
else:
__UpperCAmelCase : str = self.params.special_tok_ids['unk_token']
__UpperCAmelCase : Optional[int] = [list(t.astype(SCREAMING_SNAKE_CASE__)) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__)) for t in token_ids]
assert len(tk_) == len(SCREAMING_SNAKE_CASE__)
assert all(len(SCREAMING_SNAKE_CASE__) == max_seq_len_ for t in tk_)
__UpperCAmelCase : Optional[int] = torch.tensor(tk_) # (bs, max_seq_len_)
__UpperCAmelCase : Dict = torch.tensor(SCREAMING_SNAKE_CASE__) # (bs)
return tk_t, lg_t
| 77 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> int:
"""simple docstring"""
a_ : List[str] = {}
if train_file is not None:
a_ : Dict = [train_file]
if eval_file is not None:
a_ : str = [eval_file]
if test_file is not None:
a_ : Any = [test_file]
a_ : Any = datasets.load_dataset('csv' , data_files=__A )
a_ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
a_ : Any = features_name.pop(__A )
a_ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a_ : str = {label: i for i, label in enumerate(__A )}
a_ : Optional[Any] = tokenizer.model_input_names
a_ : Union[str, Any] = {}
if len(__A ) == 1:
for k in files.keys():
a_ : Union[str, Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding='max_length' ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
a_ : Optional[Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding='max_length' , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
a_ : List[str] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a_ : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a_ : Optional[int] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a_ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a_ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a_ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
snake_case__ : str = field(default=lowercase__ , metadata={'''help''': '''The path of the training file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the development file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the test file'''} )
snake_case__ : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a_ , a_ , a_ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ , a_ , a_ , a_ : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a_ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
a_ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a_ : str = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : Optional[int] = trainer.evaluate()
a_ : int = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 570 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a : Any = None
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_a : List[Any] = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class a_ ( a ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : int = ['input_ids', 'attention_mask']
A__ : Union[str, Any] = MBartTokenizer
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Any , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
snake_case : List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[str] = vocab_file
snake_case : Tuple = False if not self.vocab_file else True
snake_case : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
snake_case : Tuple = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
snake_case : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
snake_case : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : str = [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case : str = src_lang
snake_case : Optional[int] = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : str = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : str = tgt_lang_id
return inputs
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : List[str] = src_lang
snake_case : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Tuple = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : Union[str, Any] = []
snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
snake_case : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : str = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : Optional[Any] = []
snake_case : List[str] = [self.eos_token_id, self.cur_lang_code]
snake_case : Any = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE : Any = os.path.join(git_repo_path, "src", "diffusers")
class __lowercase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(a__ , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(a__ , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(a__ , '''torch_and_transformers_and_onnx''' )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , a__ )
self.assertIn('''torch_and_transformers''' , a__ )
self.assertIn('''flax_and_transformers''' , a__ )
self.assertIn('''torch_and_transformers_and_onnx''' , a__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(a__ , '''\nCONSTANT = None\n''' )
A_ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
a__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A_ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
A_ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(a__ , a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
A_ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , a__ ) | 141 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = tempfile.mkdtemp()
A_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
A_ = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(a__ , a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
A_ = self.get_image_processor(do_normalize=a__ )
A_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=a__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(a__ , return_tensors='''np''' )
A_ = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = processor(text=a__ )
A_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(a__ )
A_ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 141 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 518 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> Dict:
lowercase : List[Any] = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
lowercase : Optional[int] = torch.load(hf_hub_download(repo_id=__snake_case , filename="pytorch_model.bin" ) )
lowercase : Optional[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
lowercase : List[str] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
lowercase : Union[str, Any] = tensor_value
lowercase : List[str] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
lowercase : Tuple = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 518 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
UpperCAmelCase__ = LEDTokenizer
UpperCAmelCase__ = LEDTokenizerFast
UpperCAmelCase__ = True
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
super().setUp()
UpperCamelCase_ : str =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_ : Any =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_ : Optional[int] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_ : Any ={'unk_token': '<unk>'}
UpperCamelCase_ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCamelCase_ ( self :Any , **_lowerCamelCase :List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCamelCase_ ( self :Dict , **_lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase_ : List[str] =[0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : Union[str, Any] =tokenizer(_lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase_ : str =batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_torch
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[str] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : List[Any] =tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='pt' )
self.assertIn('input_ids' , _lowerCamelCase )
self.assertIn('attention_mask' , _lowerCamelCase )
self.assertNotIn('labels' , _lowerCamelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCamelCase )
@require_torch
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =[
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : Optional[Any] =tokenizer(text_target=_lowerCamelCase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : Tuple =tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =['A long paragraph for summarization.']
UpperCamelCase_ : List[Any] =[
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : Tuple =tokenizer(_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ : Dict =tokenizer(text_target=_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ : Optional[Any] =inputs['input_ids']
UpperCamelCase_ : Any =targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_ : Any =['Summary of the text.', 'Another summary.']
UpperCamelCase_ : Optional[int] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase_ : Dict =tokenizer(_lowerCamelCase , padding=_lowerCamelCase )
UpperCamelCase_ : Dict =[[0] * len(_lowerCamelCase ) for x in encoded_output['input_ids']]
UpperCamelCase_ : int =tokenizer.pad(_lowerCamelCase )
self.assertSequenceEqual(outputs['global_attention_mask'] , _lowerCamelCase )
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_ : Any =self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Tuple =self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Optional[int] ='A, <mask> AllenNLP sentence.'
UpperCamelCase_ : Union[str, Any] =tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
UpperCamelCase_ : Any =tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase_ : Dict =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase_ : Optional[int] =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 357 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'nielsr/canine-s': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0XE_0_0_0
__SCREAMING_SNAKE_CASE = 0XE_0_0_1
__SCREAMING_SNAKE_CASE = 0XE_0_0_2
__SCREAMING_SNAKE_CASE = 0XE_0_0_3
__SCREAMING_SNAKE_CASE = 0XE_0_0_4
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a__ ( A__ ):
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :List[str] , _lowerCamelCase :Optional[Any]=chr(_lowerCamelCase ) , _lowerCamelCase :List[str]=chr(_lowerCamelCase ) , _lowerCamelCase :Any=chr(_lowerCamelCase ) , _lowerCamelCase :Optional[int]=chr(_lowerCamelCase ) , _lowerCamelCase :Tuple=chr(_lowerCamelCase ) , _lowerCamelCase :Union[str, Any]=chr(_lowerCamelCase ) , _lowerCamelCase :str=False , _lowerCamelCase :Union[str, Any]=2_048 , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
UpperCamelCase_ : List[str] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
UpperCamelCase_ : List[str] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
UpperCamelCase_ : Tuple =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
UpperCamelCase_ : Optional[int] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ : Dict =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , model_max_length=_lowerCamelCase , **_lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase_ : Dict[str, int] ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase_ : List[str] =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase_ : Dict[int, str] ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase_ : str =UNICODE_VOCAB_SIZE
UpperCamelCase_ : Any =len(self._special_codepoints )
@property
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self._unicode_vocab_size
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
return list(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
try:
return ord(_lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def lowerCamelCase_ ( self :str , _lowerCamelCase :int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :str ):
'''simple docstring'''
return "".join(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : str =[self.sep_token_id]
UpperCamelCase_ : Tuple =[self.cls_token_id]
UpperCamelCase_ : List[str] =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
UpperCamelCase_ : int =[1] + ([0] * len(_lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_lowerCamelCase )) + [1]
return result
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Dict =[self.sep_token_id]
UpperCamelCase_ : Optional[int] =[self.cls_token_id]
UpperCamelCase_ : Dict =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
return ()
| 357 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowercase: Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_lowercase: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowercase: Dict = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_lowercase: Tuple = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def lowercase_ ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowercase: Dict = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Any = self.get_tokenizer()
_lowercase: Dict = self.get_image_processor()
_lowercase: Dict = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
_lowercase: Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase: int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowercase: List[str] = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
_lowercase: Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: str = self.get_image_processor()
_lowercase: Tuple = self.get_tokenizer()
_lowercase: Optional[int] = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
_lowercase: Optional[int] = self.prepare_image_inputs()
_lowercase: Dict = image_processor(A_ , return_tensors='''np''' )
_lowercase: Optional[Any] = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Dict = self.get_image_processor()
_lowercase: str = self.get_tokenizer()
_lowercase: Tuple = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
_lowercase: List[Any] = '''lower newer'''
_lowercase: Tuple = processor(text=A_ )
_lowercase: int = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: List[str] = self.get_image_processor()
_lowercase: Dict = self.get_tokenizer()
_lowercase: Dict = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
_lowercase: Dict = '''lower newer'''
_lowercase: Tuple = self.prepare_image_inputs()
_lowercase: int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(A_ ):
processor()
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[Any] = self.get_image_processor()
_lowercase: Union[str, Any] = self.get_tokenizer()
_lowercase: str = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
_lowercase: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase: Dict = processor.batch_decode(A_ )
_lowercase: Optional[Any] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Tuple = self.get_image_processor()
_lowercase: int = self.get_tokenizer()
_lowercase: Any = VisionTextDualEncoderProcessor(tokenizer=A_ , image_processor=A_ )
_lowercase: Optional[Any] = '''lower newer'''
_lowercase: Union[str, Any] = self.prepare_image_inputs()
_lowercase: Optional[int] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 272 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A__ : Any = 'hf-internal-testing/tiny-random-bert'
A__ : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
A__ : Dict = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Dict = cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
_lowercase: Tuple = f.read()
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
_lowercase: List[Any] = cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
_lowercase: Union[str, Any] = cached_file(A_ , A_ , revision='''9b8c223''' )
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
_lowercase: Union[str, Any] = cached_file('''tiny-random-bert''' , A_ )
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
_lowercase: List[Any] = cached_file(A_ , A_ , revision='''aaaa''' )
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
_lowercase: int = cached_file(A_ , '''conf''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
_lowercase: Dict = cached_file(A_ , '''conf''' )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
_lowercase: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '''.no_exist''' , A_ , '''conf''' ) ) )
_lowercase: Optional[int] = cached_file(A_ , '''conf''' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_lowercase: Optional[int] = cached_file(A_ , '''conf''' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_lowercase: Optional[Any] = mock.Mock()
_lowercase: Optional[Any] = 500
_lowercase: Optional[Any] = {}
_lowercase: Dict = HTTPError
_lowercase: Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
_lowercase: Tuple = cached_file(A_ , '''conf''' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self ) -> str:
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , A_ , revision='''ahaha''' )
_lowercase: Optional[int] = get_file_from_repo('''bert-base-cased''' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase: Union[str, Any] = json.loads(open(A_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase: List[Any] = Path(A_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(A_ , '''a.txt''' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , '''b.txt''' ) )
| 272 | 1 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = int(__lowercase )
if n_element < 1:
a__ = ValueError("a should be a positive number" )
raise my_error
a__ = [1]
a__ , a__ , a__ = (0, 0, 0)
a__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase_ : str = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
UpperCamelCase_ : Optional[Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"The list with nth numbers is: {hamming_numbers}")
print("""-----------------------------------------------------""")
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def lowercase ( _snake_case : List[str] , _snake_case : str ) ->Union[str, Any]:
"""simple docstring"""
__snake_case = []
__snake_case = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__snake_case = subprocess.run(_snake_case , shell=_snake_case , stdout=subprocess.PIPE )
__snake_case = output.stdout.decode('''utf-8''' )
__snake_case = json.loads(_snake_case )
__snake_case = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_snake_case )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
if len(_snake_case ) > 0:
__snake_case = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def lowercase ( _snake_case : Dict ) ->Tuple:
"""simple docstring"""
return values.split(''',''' )
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 710 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='sew-d'
def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_=2 , a_=5_12 , a_=2_56 , a_=True , a_=True , a_=("p2c", "c2p") , a_="layer_norm" , a_="gelu_python" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.02 , a_=1E-7 , a_=1E-5 , a_="group" , a_="gelu" , a_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_=False , a_=1_28 , a_=16 , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=0 , a_=1 , a_=2 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case : Any = hidden_size
__snake_case : Tuple = feat_extract_norm
__snake_case : int = feat_extract_activation
__snake_case : List[str] = list(a_ )
__snake_case : Optional[Any] = list(a_ )
__snake_case : List[str] = list(a_ )
__snake_case : List[str] = conv_bias
__snake_case : Dict = num_conv_pos_embeddings
__snake_case : str = num_conv_pos_embedding_groups
__snake_case : int = len(self.conv_dim )
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = squeeze_factor
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[Any] = position_buckets
__snake_case : Union[str, Any] = share_att_key
__snake_case : Tuple = relative_attention
__snake_case : str = norm_rel_ebd
__snake_case : Tuple = list(a_ )
__snake_case : Optional[int] = hidden_act
__snake_case : int = num_attention_heads
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Tuple = feat_proj_dropout
__snake_case : str = final_dropout
__snake_case : str = layer_norm_eps
__snake_case : Tuple = feature_layer_norm_eps
__snake_case : Tuple = initializer_range
__snake_case : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Union[str, Any] = apply_spec_augment
__snake_case : str = mask_time_prob
__snake_case : Optional[Any] = mask_time_length
__snake_case : List[Any] = mask_time_min_masks
__snake_case : str = mask_feature_prob
__snake_case : List[str] = mask_feature_length
__snake_case : Optional[int] = mask_feature_min_masks
# ctc loss
__snake_case : Union[str, Any] = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
__snake_case : str = use_weighted_layer_sum
__snake_case : Any = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 229 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : str = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['ChineseCLIPFeatureExtractor']
a : Dict = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a = False
try:
a = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = [] ):
_UpperCAmelCase = 0
_UpperCAmelCase = choices
_UpperCAmelCase = prompt
if sys.platform == "win32":
_UpperCAmelCase = """*"""
else:
_UpperCAmelCase = """➔ """
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ):
_UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def UpperCAmelCase ( self ):
_UpperCAmelCase = int(chr(self.current_selection ) )
_UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCAmelCase = int(builtins.input() )
except ValueError:
_UpperCAmelCase = default_choice
else:
_UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , """\n""" )
return choice | 518 | 0 |
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
assert x is not None
assert y is not None
A : Any = len(UpperCamelCase__ )
A : Dict = len(UpperCamelCase__ )
# declaring the array for storing the dp values
A : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
A : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A : int = ''''''
A, A : Optional[Any] = m, n
while i > 0 and j > 0:
A : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A : List[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
snake_case_ = """AGGTAB"""
snake_case_ = """GXTXAYB"""
snake_case_ = 4
snake_case_ = """GTAB"""
snake_case_ , snake_case_ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 537 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=a ):
_UpperCamelCase = ["""onnx"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''onnx'''] )
@classmethod
def snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''onnx'''] )
| 537 | 1 |
'''simple docstring'''
import json
import sys
def lowerCamelCase__ ( a__ , a__) -> Tuple:
"""simple docstring"""
with open(snake_case__ , encoding='utf-8') as f:
_snake_case : Union[str, Any] = json.load(snake_case__)
_snake_case : List[str] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(snake_case__):
_snake_case : List[Any] = results[benchmark_name]
_snake_case : Dict = benchmark_name.split('/')[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""")
_snake_case : Dict = """| metric |"""
_snake_case : Dict = """|--------|"""
_snake_case : List[str] = """| new / old (diff) |"""
for metric_name in sorted(snake_case__):
_snake_case : List[str] = benchmark_res[metric_name]
_snake_case : List[str] = metric_vals["""new"""]
_snake_case : List[str] = metric_vals.get('old' , snake_case__)
_snake_case : Dict = metric_vals.get('diff' , snake_case__)
_snake_case : Any = F""" {new_val:f}""" if isinstance(snake_case__ , (int, float)) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(snake_case__ , (int, float)) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(snake_case__ , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>')
with open(snake_case__ , 'w' , encoding='utf-8') as f:
f.writelines('\n'.join(snake_case__))
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 517 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowerCAmelCase = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_lowerCAmelCase = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_lowerCAmelCase = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase(self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Optional[Any] = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
A_ : List[str] = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 180 | 0 |
"""simple docstring"""
import random
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> Tuple:
a : str = {i: [] for i in range(a__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a__ ):
for j in range(i + 1 , a__ ):
if random.random() < probability:
graph[i].append(a__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a__ )
return graph
def A_ ( UpperCAmelCase__ ) -> List[Any]:
return {
i: [j for j in range(a__ ) if i != j] for i in range(a__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Tuple = TypeVar("T")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
a : Any | T = None
a : int = len(__UpperCAmelCase )
a : list[T] = [any_type for _ in range(self.N )] + arr
a : str = fnc
self.build()
def lowercase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
p += self.N
a : str = v
while p > 1:
a : str = p // 2
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> T | None: # noqa: E741
a , a : Dict = l + self.N, r + self.N
a : T | None = None
while l <= r:
if l % 2 == 1:
a : Any = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
a : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
a , a : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE__ : str = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE__ : Optional[int] = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE__ : int = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ) -> None:
for i in range(len(UpperCAmelCase__ ) ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
a : List[str] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : List[Any] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : str = reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert max_range == max_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 509 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__snake_case = pytest.mark.integration
__snake_case = {"""comet"""}
__snake_case = importlib.util.find_spec("""fairseq""") is not None
__snake_case = {"""code_eval"""}
__snake_case = os.name == """nt"""
__snake_case = {"""bertscore""", """frugalscore""", """perplexity"""}
__snake_case = importlib.util.find_spec("""transformers""") is not None
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def A_ ( ) ->Any:
lowercase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__a , __a , __a )
@local
class _a ( parameterized.TestCase ):
"""simple docstring"""
A_ = {}
A_ = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def lowerCamelCase__ ( self : Dict , lowercase_ : Any ):
'''simple docstring'''
lowercase_ = """[...]"""
lowercase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase_ ) ).module_path )
lowercase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase_ )
# check parameters
lowercase_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase_ = doctest.testmod(lowercase_ , verbose=lowercase_ , raise_on_error=lowercase_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase__ ( self : Dict , lowercase_ : str ):
'''simple docstring'''
lowercase_ = """[...]"""
lowercase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase_ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase_ = doctest.testmod(lowercase_ , verbose=lowercase_ , raise_on_error=lowercase_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase__ ( self : int , lowercase_ : Any , lowercase_ : List[Any] ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase_ ):
yield
else:
yield
@contextmanager
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
def load_local_metric(lowercase_ : Dict , *lowercase_ : Dict , **lowercase_ : Any ):
return load_metric(os.path.join("""metrics""" , lowercase_ ) , *lowercase_ , **lowercase_ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
lowercase_ = load_local_metric
yield
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowercase_ : Dict ):
'''simple docstring'''
def wrapper(lowercase_ : int ):
lowercase_ = contextmanager(lowercase_ )
lowercase_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict , lowercase_ : Union[str, Any] ):
'''simple docstring'''
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
lowercase_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
lowercase_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
def load_from_checkpoint(SCREAMING_SNAKE_CASE_ ):
class _a :
"""simple docstring"""
def lowerCamelCase__ ( self : Any , lowercase_ : Optional[int] , *lowercase_ : str , **lowercase_ : Dict ):
'''simple docstring'''
assert len(lowercase_ ) == 2
lowercase_ = [0.1_9, 0.9_2]
return scores, sum(lowercase_ ) / len(lowercase_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
lowercase_ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
lowercase_ = load_from_checkpoint
yield
def A_ ( ) ->List[Any]:
lowercase_ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
lowercase_ = """ERROR"""
lowercase_ = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE_ )
| 451 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _a ( __a ):
"""simple docstring"""
A_ = '''mra'''
def __init__( self : Any , lowercase_ : List[str]=50_265 , lowercase_ : Tuple=768 , lowercase_ : Optional[Any]=12 , lowercase_ : str=12 , lowercase_ : Optional[int]=3_072 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : str=512 , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=0.0_2 , lowercase_ : List[Any]=1e-5 , lowercase_ : Optional[Any]="absolute" , lowercase_ : int=4 , lowercase_ : int="full" , lowercase_ : Tuple=0 , lowercase_ : str=0 , lowercase_ : List[str]=1 , lowercase_ : Tuple=0 , lowercase_ : Optional[Any]=2 , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = block_per_row
lowercase_ = approx_mode
lowercase_ = initial_prior_first_n_blocks
lowercase_ = initial_prior_diagonal_n_blocks
| 451 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase ( _snake_case ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def lowerCamelCase ( _snake_case ):
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_snake_case ,id=_snake_case )
| 254 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = num_stages
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = out_features
UpperCAmelCase__ : Tuple = out_indices
UpperCAmelCase__ : Dict = scope
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : int = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : str = ConvNextModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , lowercase ):
UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase : List[str] = ConvNextConfig
UpperCamelCase : Tuple = False
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
| 254 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase_ : List[str] = '__DUMMY_TRANSFORMERS_USER__'
lowercase_ : Union[str, Any] = 'Dummy User'
lowercase_ : Tuple = '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'
lowercase_ : Optional[int] = 'https://hub-ci.huggingface.co'
lowercase_ : List[Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowercase_ : Optional[Any] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowercase_ : Dict = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def A__ ( snake_case_ : Optional[Any] ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , snake_case_ )
@pytest.fixture
def A__ ( snake_case_ : List[Any] ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , snake_case_ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , snake_case_ )
@pytest.fixture
def A__ ( snake_case_ : Optional[Any] ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , snake_case_ )
@pytest.fixture
def A__ ( snake_case_ : List[Any] , snake_case_ : str ):
HfFolder.save_token(snake_case_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def A__ ( ):
return HfApi(endpoint=snake_case_ )
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : HfApi ):
SCREAMING_SNAKE_CASE__: Optional[Any]= HfFolder.get_token()
HfFolder.save_token(snake_case_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case_ )
@pytest.fixture
def A__ ( snake_case_ : Union[str, Any] ):
def _cleanup_repo(snake_case_ : Dict ):
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def A__ ( snake_case_ : List[Any] ):
@contextmanager
def _temporary_repo(snake_case_ : List[Any] ):
try:
yield repo_id
finally:
cleanup_repo(snake_case_ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : HfApi , snake_case_ : Optional[Any] , snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Optional[int]= F'repo_txt_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE__: int= F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo='''data/text_data.txt''' , repo_id=snake_case_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : HfApi , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: str= F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE__: int= F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo='''data.zip''' , repo_id=snake_case_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : HfApi , snake_case_ : int , snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: Tuple= F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE__: int= F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo='''data.zip''' , repo_id=snake_case_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[Any] ):
return hf_private_dataset_repo_zipped_img_data_
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
'''simple docstring'''
import math
import os
import sys
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Dict = ''
try:
with open(_lowercase, 'rb' ) as binary_file:
_lowercase : List[Any] = binary_file.read()
for dat in data:
_lowercase : Optional[Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> None:
lexicon.pop(_lowercase )
_lowercase : Dict = last_match_id
if math.loga(_lowercase ).is_integer():
for curr_key in lexicon:
_lowercase : Optional[int] = '0' + lexicon[curr_key]
_lowercase : Union[str, Any] = bin(_lowercase )[2:]
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Tuple = {'0': '0', '1': '1'}
_lowercase , _lowercase : Any = '', ''
_lowercase : Dict = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowercase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowercase, _lowercase, _lowercase, _lowercase )
index += 1
_lowercase : Tuple = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowercase : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Union[str, Any] = os.path.getsize(_lowercase )
_lowercase : Optional[Any] = bin(_lowercase )[2:]
_lowercase : str = len(_lowercase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = 8
try:
with open(_lowercase, 'wb' ) as opened_file:
_lowercase : str = [
to_write[i : i + byte_length]
for i in range(0, len(_lowercase ), _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowercase, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = read_file_binary(_lowercase )
_lowercase : Optional[Any] = compress_data(_lowercase )
_lowercase : int = add_file_length(_lowercase, _lowercase )
write_file_binary(_lowercase, _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 161 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
A_ = 'linear'
A_ = 'cosine'
A_ = 'cosine_with_restarts'
A_ = 'polynomial'
A_ = 'constant'
A_ = 'constant_with_warmup'
A_ = 'piecewise_constant'
def _lowerCAmelCase ( lowercase : Optimizer , lowercase : int = -1 ) ->Tuple:
"""simple docstring"""
return LambdaLR(lowercase , lambda lowercase : 1 , last_epoch=lowercase )
def _lowerCAmelCase ( lowercase : Optimizer , lowercase : int , lowercase : int = -1 ) ->int:
"""simple docstring"""
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1.0 , lowercase ) )
return 1.0
return LambdaLR(lowercase , lowercase , last_epoch=lowercase )
def _lowerCAmelCase ( lowercase : Optimizer , lowercase : str , lowercase : int = -1 ) ->Dict:
"""simple docstring"""
lowercase__ = {}
lowercase__ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowercase__ , lowercase__ = rule_str.split(''':''' )
lowercase__ = int(lowercase )
lowercase__ = float(lowercase )
lowercase__ = value
lowercase__ = float(rule_list[-1] )
def create_rules_function(lowercase : Optional[int] , lowercase : Tuple ):
def rule_func(lowercase : int ) -> float:
lowercase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase__ = create_rules_function(lowercase , lowercase )
return LambdaLR(lowercase , lowercase , last_epoch=lowercase )
def _lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Dict , lowercase : Optional[int]=-1 ) ->Optional[int]:
"""simple docstring"""
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase , lowercase , lowercase )
def _lowerCAmelCase ( lowercase : Optimizer , lowercase : int , lowercase : int , lowercase : float = 0.5 , lowercase : int = -1 ) ->Dict:
"""simple docstring"""
def lr_lambda(lowercase : Any ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
lowercase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) )
return LambdaLR(lowercase , lowercase , lowercase )
def _lowerCAmelCase ( lowercase : Optimizer , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : int = -1 ) ->Any:
"""simple docstring"""
def lr_lambda(lowercase : Dict ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
lowercase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) )
return LambdaLR(lowercase , lowercase , lowercase )
def _lowerCAmelCase ( lowercase : Any , lowercase : int , lowercase : Tuple , lowercase : str=1e-7 , lowercase : Any=1.0 , lowercase : Union[str, Any]=-1 ) ->Any:
"""simple docstring"""
lowercase__ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase__ = lr_init - lr_end
lowercase__ = num_training_steps - num_warmup_steps
lowercase__ = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase , lowercase , lowercase )
_lowerCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _lowerCAmelCase ( lowercase : Union[str, SchedulerType] , lowercase : Optimizer , lowercase : Optional[str] = None , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 1.0 , lowercase : int = -1 , ) ->List[Any]:
"""simple docstring"""
lowercase__ = SchedulerType(lowercase )
lowercase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase , last_epoch=lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase , step_rules=lowercase , last_epoch=lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase , num_warmup_steps=lowercase , last_epoch=lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , num_cycles=lowercase , last_epoch=lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , power=lowercase , last_epoch=lowercase , )
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , last_epoch=lowercase )
| 161 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 100 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowerCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
lowerCAmelCase__ = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
" process." )
lowerCAmelCase__ = int(lowerCamelCase_ )
lowerCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCAmelCase__ = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , device=audio.device )
lowerCAmelCase__ = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
lowerCAmelCase__ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 718 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : str = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ="blip_2_vision_model"
def __init__( self : Any , _lowerCamelCase : Tuple=1_4_0_8 , _lowerCamelCase : Optional[int]=6_1_4_4 , _lowerCamelCase : Any=3_9 , _lowerCamelCase : Optional[int]=1_6 , _lowerCamelCase : Union[str, Any]=2_2_4 , _lowerCamelCase : Tuple=1_4 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : int=0.00_001 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Union[str, Any]=1E-10 , _lowerCamelCase : List[Any]=True , **_lowerCamelCase : Optional[Any] , ):
super().__init__(**_lowerCamelCase )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
A__ = qkv_bias
@classmethod
def A__ ( cls : List[str] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : int ):
cls._set_token_in_kwargs(_lowerCamelCase )
A__ , A__ = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Tuple ="blip_2_qformer"
def __init__( self : Union[str, Any] , _lowerCamelCase : int=3_0_5_2_2 , _lowerCamelCase : List[str]=7_6_8 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : Tuple=1_2 , _lowerCamelCase : Any=3_0_7_2 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Optional[Any]=5_1_2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[int]=1E-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="absolute" , _lowerCamelCase : Dict=2 , _lowerCamelCase : Tuple=1_4_0_8 , **_lowerCamelCase : Any , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = cross_attention_frequency
A__ = encoder_hidden_size
@classmethod
def A__ ( cls : Any , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Optional[int] ):
cls._set_token_in_kwargs(_lowerCamelCase )
A__ , A__ = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
A__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Tuple ="blip-2"
_lowerCamelCase : Optional[Any] =True
def __init__( self : Dict , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Dict=3_2 , **_lowerCamelCase : str ):
super().__init__(**_lowerCamelCase )
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
A__ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
A__ = BlipaVisionConfig(**_lowerCamelCase )
A__ = BlipaQFormerConfig(**_lowerCamelCase )
A__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
A__ = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
A__ = self.text_config.tie_word_embeddings
A__ = self.text_config.is_encoder_decoder
A__ = num_query_tokens
A__ = self.vision_config.hidden_size
A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A__ = 1.0
A__ = 0.02
@classmethod
def A__ ( cls : int , _lowerCamelCase : BlipaVisionConfig , _lowerCamelCase : BlipaQFormerConfig , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : Optional[Any] , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def A__ ( self : int ):
A__ = copy.deepcopy(self.__dict__ )
A__ = self.vision_config.to_dict()
A__ = self.qformer_config.to_dict()
A__ = self.text_config.to_dict()
A__ = self.__class__.model_type
return output
| 571 |
"""simple docstring"""
from collections import namedtuple
__snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
__snake_case : Union[str, Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def a_ ( __a , __a , __a ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(__a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(__a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCAmelCase ( A__ ) -> int:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case : int = k.replace(A__ , A__ )
if k.startswith("""encoder""" ):
_snake_case : List[str] = k.replace(""".attn""" , """.self_attn""" )
_snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
_snake_case : int = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
_snake_case : str = k.replace("""norm1""" , """self_attn_layer_norm""" )
_snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
_snake_case : str = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCAmelCase ( A__ ) -> Optional[int]:
_snake_case : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_snake_case : Union[str, Any] = sd.pop(A__ )
_snake_case : Union[str, Any] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
_snake_case : Optional[int] = v
UpperCAmelCase_ = ['''START''']
@torch.no_grad()
def UpperCAmelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
_snake_case : Optional[Any] = torch.load(A__ , map_location="""cpu""" )
_snake_case : Optional[int] = model["""model"""]
_snake_case : str = BlenderbotConfig.from_json_file(A__ )
_snake_case : Optional[int] = BlenderbotForConditionalGeneration(A__ )
_snake_case : List[str] = m.model.state_dict().keys()
_snake_case : List[str] = []
_snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case : Optional[int] = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case : List[str] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
UpperCAmelCase_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 705 |
from math import factorial
def UpperCAmelCase ( A__ , A__ , A__ ) -> float:
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_snake_case : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(A__ ) )
coefficient /= factorial(A__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 519 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Union[str, Any] = []
for line in lines:
UpperCamelCase__ : str = re.sub(R'#.*' , '' , lowerCamelCase_) # remove comments
if line:
filtered_lines.append(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = '\n'.join(lowerCamelCase_)
# Make a hash from all this code
UpperCamelCase__ : Union[str, Any] = full_str.encode('utf-8')
return shaaaa(lowerCamelCase_).hexdigest()
# get importable module names and hash for caching
lowerCAmelCase__ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCAmelCase__ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCAmelCase__ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCAmelCase__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 596 | 0 |
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : List[str] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Optional[int] = """image_segmenter"""
lowercase__ : Any = CLIPSegForImageSegmentation
lowercase__ : Optional[int] = ["""image""", """text"""]
lowercase__ : Union[str, Any] = ["""image"""]
def __init__( self : List[str] , *lowercase : List[str] , **lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : str , lowercase : Dict , lowercase : Dict ) -> str:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def snake_case__ ( self : Dict , lowercase : Tuple ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
__lowercase = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def snake_case__ ( self : Dict , lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = outputs.cpu().detach().numpy()
__lowercase = 0
__lowercase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : List[Any]=30 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : int=37 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=10 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , ) -> Tuple:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = num_patches + 2
def __A ( self : str ) -> Union[str, Any]:
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
__lowerCamelCase = DeiTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
__lowerCamelCase = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : int ) -> int:
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ : List[Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ : Optional[Any] = False
a__ : Optional[int] = False
a__ : Optional[int] = False
def __A ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = DeiTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __A ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __A ( self : Optional[int] ) -> int:
pass
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __A ( self : Any ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=False ) -> Any:
__lowerCamelCase = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self : int ) -> List[Any]:
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __A ( self : Optional[int] ) -> Dict:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCamelCase = False
__lowerCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __A ( self : Dict ) -> Dict:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE__ ),
*get_values(SCREAMING_SNAKE_CASE__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
__lowerCamelCase = problem_type['''title''']
__lowerCamelCase = problem_type['''num_labels''']
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if problem_type["num_labels"] > 1:
__lowerCamelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowerCamelCase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE__ ) as warning_list:
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __A ( self : Optional[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __magic_name__ ( ) -> str:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __A ( self : List[Any] ) -> Union[str, Any]:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __A ( self : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self : Any ) -> Dict:
__lowerCamelCase = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
__lowerCamelCase = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
| 298 |
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCamelCase = []
__lowerCamelCase = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
__lowerCamelCase = factorials.pop()
__lowerCamelCase , __lowerCamelCase = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case ( a_ : Any , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Tuple=True , a_ : List[Any]="pt" ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(a_ , a_ ) and not line.startswith(""" """ ) else {}
UpperCamelCase_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding="""max_length""" if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def snake_case ( a_ : Optional[int] , a_ : Dict , a_ : Any=None , ):
"""simple docstring"""
UpperCamelCase_ : Dict = input_ids.ne(a_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( lowercase_ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="train" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="" , ):
super().__init__()
UpperCamelCase_ : str = Path(UpperCamelCase__ ).joinpath(type_path + """.source""" )
UpperCamelCase_ : Optional[int] = Path(UpperCamelCase__ ).joinpath(type_path + """.target""" )
UpperCamelCase_ : str = self.get_char_lens(self.src_file )
UpperCamelCase_ : List[str] = max_source_length
UpperCamelCase_ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
UpperCamelCase_ : List[Any] = tokenizer
UpperCamelCase_ : Tuple = prefix
if n_obs is not None:
UpperCamelCase_ : Tuple = self.src_lens[:n_obs]
UpperCamelCase_ : Any = src_lang
UpperCamelCase_ : Union[str, Any] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = index + 1 # linecache starts at 1
UpperCamelCase_ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip("""\n""" )
UpperCamelCase_ : List[Any] = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip("""\n""" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase_ : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
)
UpperCamelCase_ : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
UpperCamelCase_ : Tuple = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , """right""" )
UpperCamelCase_ : Optional[Any] = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , """right""" )
UpperCamelCase_ : List[str] = source_inputs["""input_ids"""].squeeze()
UpperCamelCase_ : Any = target_inputs["""input_ids"""].squeeze()
UpperCamelCase_ : str = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase ):
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = torch.stack([x["""input_ids"""] for x in batch] )
UpperCamelCase_ : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCamelCase_ : Dict = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCamelCase_ : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase_ : Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase_ : str = trim_batch(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase_ , UpperCamelCase_ : str = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCamelCase_ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCamelCase =getLogger(__name__)
def snake_case ( a_ : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(a_ ) )
def snake_case ( a_ : str ):
"""simple docstring"""
UpperCamelCase_ : Tuple = get_git_info()
save_json(a_ , os.path.join(a_ , """git_log.json""" ) )
def snake_case ( a_ : int , a_ : List[Any] , a_ : Optional[int]=4 , **a_ : int ):
"""simple docstring"""
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ , indent=a_ , **a_ )
def snake_case ( a_ : List[str] ):
"""simple docstring"""
with open(a_ ) as f:
return json.load(a_ )
def snake_case ( ):
"""simple docstring"""
UpperCamelCase_ : Any = git.Repo(search_parent_directories=a_ )
UpperCamelCase_ : str = {
"""repo_id""": str(a_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def snake_case ( a_ : Callable , a_ : Iterable ):
"""simple docstring"""
return list(map(a_ , a_ ) )
def snake_case ( a_ : Optional[Any] , a_ : Dict ):
"""simple docstring"""
with open(a_ , """wb""" ) as f:
return pickle.dump(a_ , a_ )
def snake_case ( a_ : Any ):
"""simple docstring"""
def remove_articles(a_ : List[str] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , a_ )
def white_space_fix(a_ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(a_ : List[Any] ):
UpperCamelCase_ : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def snake_case ( a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase_ : Any = normalize_answer(a_ ).split()
UpperCamelCase_ : Any = normalize_answer(a_ ).split()
UpperCamelCase_ : Optional[Any] = Counter(a_ ) & Counter(a_ )
UpperCamelCase_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase_ : str = 1.0 * num_same / len(a_ )
UpperCamelCase_ : Optional[Any] = 1.0 * num_same / len(a_ )
UpperCamelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case ( a_ : Union[str, Any] , a_ : Union[str, Any] ):
"""simple docstring"""
return normalize_answer(a_ ) == normalize_answer(a_ )
def snake_case ( a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
assert len(a_ ) == len(a_ )
UpperCamelCase_ : List[Any] = 0
for hypo, pred in zip(a_ , a_ ):
em += exact_match_score(a_ , a_ )
if len(a_ ) > 0:
em /= len(a_ )
return {"em": em}
def snake_case ( a_ : Tuple ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def snake_case ( a_ : int , a_ : Any , a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase_ : Optional[Any] = """dropout_rate"""
for p in extra_params:
if getattr(a_ , a_ , a_ ):
if not hasattr(a_ , a_ ) and not hasattr(a_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(a_ ) )
delattr(a_ , a_ )
continue
UpperCamelCase_ : Union[str, Any] = p if hasattr(a_ , a_ ) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_ ) )
delattr(a_ , a_ )
return hparams, config
| 711 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase =""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class A ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase = " " ):
UpperCamelCase_ : str = sentence_delimiter
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return list(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = []
for sent_idx, sentence in enumerate(__lowerCAmelCase ):
chars.extend(self.process_string(__lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase ="\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCamelCase ="\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCamelCase ="\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
if concatenate_texts:
return jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )["wer"]
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : str = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 543 | 0 |
import argparse
import copy
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase = []
_list.append([line.split()[1], line.split()[2]] )
lowercase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase = []
_list.append([line.split()[0], line.split()[2]] )
lowercase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase = f.read(1 )
lowercase = start_node
lowercase = []
lowercase = start_node
lowercase = 0
while visiting not in first_solution:
lowercase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase = k[1]
lowercase = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for n in solution[1:-1]:
lowercase = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = kn
lowercase = n
lowercase = 0
for k in _tmp[:-1]:
lowercase = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1
lowercase = first_solution
lowercase = []
lowercase = distance_of_first_solution
lowercase = solution
while count <= iters:
lowercase = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = neighborhood[index_of_best_solution]
lowercase = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase = False
while not found:
lowercase = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase = best_solution[i]
lowercase = solution[i]
break
lowercase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase = True
lowercase = best_solution[:-1]
lowercase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase = cost
lowercase = solution
else:
lowercase = index_of_best_solution + 1
lowercase = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None ):
lowercase = generate_neighbours(args.File )
lowercase , lowercase = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 84 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 1 |
from __future__ import annotations
def __lowercase ( snake_case ):
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(snake_case ) / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__magic_name__ :int = getattr(snake_case, '''handle_key''', [] )
handle += [key]
setattr(snake_case, '''handle_key''', snake_case )
return func
return decorator
def __lowercase ( *snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__magic_name__ :Union[str, Any] = getattr(snake_case, '''handle_key''', [] )
handle += keys
setattr(snake_case, '''handle_key''', snake_case )
return func
return decorator
class lowerCamelCase_ ( lowerCamelCase ):
def __new__( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , '''key_handler''' ):
setattr(__lowerCAmelCase , '''key_handler''' , {} )
setattr(__lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ :Dict = getattr(__lowerCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
__magic_name__ :List[Any] = value
return new_cls
@staticmethod
def A ( cls ):
"""simple docstring"""
__magic_name__ :Tuple = get_character()
if char != KEYMAP["undefined"]:
__magic_name__ :Dict = ord(__lowerCAmelCase )
__magic_name__ :str = cls.key_handler.get(__lowerCAmelCase )
if handler:
__magic_name__ :Optional[Any] = char
return handler(cls )
else:
return None
def __lowercase ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 180 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCAmelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
_lowerCAmelCase = f'https://www.google.com/search?q={query}&num=100'
_lowerCAmelCase = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
_lowerCAmelCase = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
_lowerCAmelCase = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 10 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip() | 518 | 0 |
def UpperCamelCase__ ( UpperCAmelCase ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase__ ( UpperCAmelCase ) -> int:
"""simple docstring"""
_a : List[Any] = 0
_a : int = 2
while digits < n:
index += 1
_a : int = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def UpperCamelCase__ ( UpperCAmelCase = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 307 |
from math import pi, sqrt, tan
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_a : str = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(UpperCAmelCase , 2 ) * torus_radius * tube_radius
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_a : List[str] = (sidea + sidea + sidea) / 2
_a : Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 307 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_A : Any = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : List[Any] , __snake_case : List[str]=False , __snake_case : List[Any]=False ) -> Optional[int]:
lowercase : Any = "backbone." if is_semantic else ""
lowercase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int]=False , __snake_case : List[Any]=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
lowercase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowercase : List[str] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase : Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase : Dict = q_bias
lowercase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase : List[Any] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowercase : Dict = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowercase : Tuple = gamma_a
lowercase : List[str] = gamma_a
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : int , __snake_case : Tuple ) -> List[Any]:
lowercase : Tuple = dct.pop(__snake_case )
lowercase : str = val
def __magic_name__ ( ) -> str:
lowercase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Dict=False ) -> Optional[Any]:
lowercase : Any = False if "rvlcdip" in checkpoint_url else True
lowercase : List[Any] = BeitConfig(use_absolute_position_embeddings=__snake_case , use_mask_token=__snake_case )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase : str = 1024
lowercase : str = 4096
lowercase : List[Any] = 24
lowercase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase : Union[str, Any] = 16
lowercase : List[str] = "huggingface/label-files"
lowercase : int = "rvlcdip-id2label.json"
lowercase : Any = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowercase : Any = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase : Any = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )["model"]
lowercase : Dict = create_rename_keys(__snake_case , has_lm_head=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , has_lm_head=__snake_case )
# load HuggingFace model
lowercase : int = BeitForMaskedImageModeling(__snake_case ) if has_lm_head else BeitForImageClassification(__snake_case )
model.eval()
model.load_state_dict(__snake_case )
# Check outputs on an image
lowercase : int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__snake_case )
lowercase : Tuple = prepare_img()
lowercase : Any = image_processor(images=__snake_case , return_tensors="pt" )
lowercase : List[str] = encoding["pixel_values"]
lowercase : List[Any] = model(__snake_case )
lowercase : Optional[Any] = outputs.logits
# verify logits
lowercase : Tuple = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__snake_case ), "Shape of logits not as expected"
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
if has_lm_head:
lowercase : Dict = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowercase : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
_A : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowercase : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowercase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowercase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowercase : Any = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
_lowercase : List[str] = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_lowercase : Dict = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
_lowercase : int = tf.keras.preprocessing.image.img_to_array(test_image)
_lowercase : int = np.expand_dims(test_image, axis=0)
_lowercase : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowercase : Optional[int] = """Normal"""
if result[0][0] == 1:
_lowercase : Union[str, Any] = """Abnormality detected"""
| 210 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : List[str] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : Optional[int] = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : int = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase_ : Tuple = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase_ : Any = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : int = VOCAB_FILES_NAMES
lowercase : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCAmelCase_ : Any = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCAmelCase_ : Any = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __UpperCAmelCase :
'''simple docstring'''
def __call__( self , _A , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , **_A , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
elif titles is None or texts is None:
_SCREAMING_SNAKE_CASE =titles if texts is None else texts
return super().__call__(
_A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
_SCREAMING_SNAKE_CASE =titles if not isinstance(_A , _A ) else [titles]
_SCREAMING_SNAKE_CASE =texts if not isinstance(_A , _A ) else [texts]
_SCREAMING_SNAKE_CASE =len(_A )
_SCREAMING_SNAKE_CASE =questions if not isinstance(_A , _A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_SCREAMING_SNAKE_CASE =super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids''']
_SCREAMING_SNAKE_CASE =super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids''']
_SCREAMING_SNAKE_CASE ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A , _A )
]
}
if return_attention_mask is not False:
_SCREAMING_SNAKE_CASE =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_SCREAMING_SNAKE_CASE =attention_mask
return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A )
def UpperCamelCase_ ( self , _A , _A , _A = 1_6 , _A = 6_4 , _A = 4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =reader_input['''input_ids''']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =reader_output[:3]
_SCREAMING_SNAKE_CASE =len(_A )
_SCREAMING_SNAKE_CASE =sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ )
_SCREAMING_SNAKE_CASE =[]
for doc_id in sorted_docs:
_SCREAMING_SNAKE_CASE =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_SCREAMING_SNAKE_CASE =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_SCREAMING_SNAKE_CASE =sequence_ids.index(self.pad_token_id )
else:
_SCREAMING_SNAKE_CASE =len(_A )
_SCREAMING_SNAKE_CASE =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , _A , _A , _A , _A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_SCREAMING_SNAKE_CASE =sorted(_A , key=lambda _A : x[1] , reverse=_A )
_SCREAMING_SNAKE_CASE =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
_SCREAMING_SNAKE_CASE =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase ):
'''simple docstring'''
lowercase : int = VOCAB_FILES_NAMES
lowercase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int = ["input_ids", "attention_mask"]
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 | 1 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = text, pattern
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self ) -> list[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
for i in range(self.textLen - self.patLen + 1 ):
__SCREAMING_SNAKE_CASE :Dict = self.mismatch_in_text(SCREAMING_SNAKE_CASE__ )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
__SCREAMING_SNAKE_CASE :List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase_ = "ABAABA"
lowerCamelCase_ = "AB"
lowerCamelCase_ = BoyerMooreSearch(text, pattern)
lowerCamelCase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions) | 498 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE :List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__SCREAMING_SNAKE_CASE :List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
__SCREAMING_SNAKE_CASE :Any = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE :Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE :int = '''lower newer'''
return input_text, output_text
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__SCREAMING_SNAKE_CASE :List[str] = '''lower newer'''
__SCREAMING_SNAKE_CASE :List[Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
__SCREAMING_SNAKE_CASE :List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE :Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
@require_ftfy
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE :Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE :List[str] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
__SCREAMING_SNAKE_CASE :List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE :Dict = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE :str = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE :Dict = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE :Dict = f'''{text_of_1_token} {text_of_1_token}'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ ,use_fast=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Any = tokenizer_r(SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) ,)
__SCREAMING_SNAKE_CASE :Optional[int] = f''' {text}'''
__SCREAMING_SNAKE_CASE :Optional[int] = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ ,use_fast=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :List[str] = tokenizer_r(SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) ,)
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass | 498 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __magic_name__ ( A : List[str], A : Tuple ):
'''simple docstring'''
a = Mock()
a = conn, Mock()
a = iter([1, None] )
a = lambda A : next(A )
# ===== invoke =====
send_file(filename="mytext.txt", testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = [[0] * n for i in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
UpperCamelCase = y_points[i]
for i in range(2 , UpperCamelCase_ ):
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 537 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 537 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
def lowerCamelCase_ ( _lowercase ) -> str:
__A : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[str] = DepthEstimationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCAmelCase )
import datasets
__A : Optional[int] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__A : str = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCAmelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __UpperCAmelCase( self ):
pass
@slow
@require_torch
def __UpperCAmelCase( self ):
__A : Any = "Intel/dpt-large"
__A : Optional[int] = pipeline("depth-estimation" , model=__UpperCAmelCase )
__A : Dict = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__A : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 )
@require_torch
def __UpperCAmelCase( self ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 387 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase = TypeVar('T')
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any | T = None
__A : int = len(__UpperCAmelCase )
__A : list[T] = [any_type for _ in range(self.N )] + arr
__A : Any = fnc
self.build()
def __UpperCAmelCase( self ):
for p in range(self.N - 1 , 0 , -1 ):
__A : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
p += self.N
__A : List[Any] = v
while p > 1:
__A : Optional[Any] = p // 2
__A : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ): # noqa: E741
__A , __A : Dict = l + self.N, r + self.N
__A : T | None = None
while l <= r:
if l % 2 == 1:
__A : Union[str, Any] = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__A : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
__A , __A : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase = SegmentTree(test_array, min)
UpperCamelCase = SegmentTree(test_array, max)
UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
for i in range(len(_lowercase ) ):
for j in range(_lowercase , len(_lowercase ) ):
__A : Dict = reduce(_lowercase , test_array[i : j + 1] )
__A : int = reduce(_lowercase , test_array[i : j + 1] )
__A : Dict = reduce(lambda _lowercase , _lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowercase , _lowercase )
assert max_range == max_segment_tree.query(_lowercase , _lowercase )
assert sum_range == sum_segment_tree.query(_lowercase , _lowercase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 387 | 1 |
"""simple docstring"""
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : Dict= 0
lowercase__ : Optional[int]= 0
lowercase__ : Any= {}
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if vertex not in self.adjacency:
lowercase__ : Tuple= {}
self.num_vertices += 1
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
self.add_vertex(_snake_case )
self.add_vertex(_snake_case )
if head == tail:
return
lowercase__ : Optional[int]= weight
lowercase__ : List[Any]= weight
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_edges()
for edge in edges:
lowercase__, lowercase__, lowercase__ : str= edge
edges.remove((tail, head, weight) )
for i in range(len(_snake_case ) ):
lowercase__ : Dict= list(edges[i] )
edges.sort(key=lambda snake_case__ : e[2] )
for i in range(len(_snake_case ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int= edges[i][2] + 1
for edge in edges:
lowercase__, lowercase__, lowercase__ : List[str]= edge
lowercase__ : str= weight
lowercase__ : List[Any]= weight
def __str__( self ):
'''simple docstring'''
lowercase__ : Dict= ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[int]= self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def UpperCAmelCase_ ( snake_case__=None , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= Graph()
if vertices is None:
lowercase__ : List[Any]= []
if edges is None:
lowercase__ : List[str]= []
for vertex in vertices:
g.add_vertex(_snake_case )
for edge in edges:
g.add_edge(*_snake_case )
return g
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= {}
lowercase__ : Tuple= {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if item in self.parent:
return self.find(_snake_case )
lowercase__ : int= item
lowercase__ : Any= 0
return item
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(_snake_case )
if item != self.parent[item]:
lowercase__ : str= self.find(self.parent[item] )
return self.parent[item]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.find(_snake_case )
lowercase__ : Tuple= self.find(_snake_case )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : int= roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : Any= roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Union[str, Any]= roota
return roota
return None
@staticmethod
def UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= graph.num_vertices
lowercase__ : Union[str, Any]= Graph.UnionFind()
lowercase__ : Any= []
while num_components > 1:
lowercase__ : Dict= {}
for vertex in graph.get_vertices():
lowercase__ : Optional[Any]= -1
lowercase__ : Union[str, Any]= graph.get_edges()
for edge in edges:
lowercase__, lowercase__, lowercase__ : str= edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__, lowercase__, lowercase__ : List[Any]= edge
lowercase__ : str= union_find.find(_snake_case )
lowercase__ : Union[str, Any]= union_find.find(_snake_case )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Union[str, Any]= [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int= [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__, lowercase__, lowercase__ : Any= cheap_edge[vertex]
if union_find.find(_snake_case ) != union_find.find(_snake_case ):
union_find.union(_snake_case , _snake_case )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Tuple= num_components - 1
lowercase__ : Optional[int]= Graph.build(edges=_snake_case )
return mst
| 218 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__a = text_generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
] , )
__a = text_generator.model.config.eos_token_id
__a = '''<pad>'''
__a = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''Hello I believe in'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__a = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__a = text_generator(_snake_case , stop_sequence=''' fe''' )
self.assertEqual(_snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = text_generator.model
__a = text_generator.tokenizer
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = pipeline(task='''text-generation''' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a = text_generator('''''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__a = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_snake_case , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = '''Hello world'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__a = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__a = logging.get_logger('''transformers.generation.utils''' )
__a = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out ) | 219 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 702 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def lowercase_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase_ ( self ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def lowercase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "apply_ocr" ) )
def lowercase_ ( self ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# with apply_OCR = True
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
A_ = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 608 | 0 |
from itertools import count
def _lowerCAmelCase ( A__: int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 254 |
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 254 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = (UnCLIPScheduler,)
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**__UpperCAmelCase )
return config
def snake_case ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCAmelCase , prev_timestep=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config(variance_type='fixed_small_log' )
lowerCAmelCase__ :int = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.scheduler_classes[0]
lowerCAmelCase__ :List[Any] = self.get_scheduler_config(variance_type='learned_range' )
lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCAmelCase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__UpperCAmelCase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__UpperCAmelCase ) - -0.0_01_00_11 < 1E-5
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Any = self.get_scheduler_config()
lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :str = scheduler.timesteps
lowerCAmelCase__ :Dict = self.dummy_model()
lowerCAmelCase__ :Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ :Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
lowerCAmelCase__ :Any = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
lowerCAmelCase__ :Dict = pred_prev_sample
lowerCAmelCase__ :Tuple = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config()
lowerCAmelCase__ :str = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCAmelCase__ :List[Any] = scheduler.timesteps
lowerCAmelCase__ :Union[str, Any] = self.dummy_model()
lowerCAmelCase__ :List[Any] = self.dummy_sample_deter
lowerCAmelCase__ :int = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase__ :Optional[Any] = None
else:
lowerCAmelCase__ :int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :Any = scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prev_timestep=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
lowerCAmelCase__ :Dict = pred_prev_sample
lowerCAmelCase__ :int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
| 560 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = KandinskyVaaImgaImgPipeline
__magic_name__ :int = ["""image_embeds""", """negative_image_embeds""", """image"""]
__magic_name__ :Union[str, Any] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__magic_name__ :int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__magic_name__ :Optional[int] = False
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case ( self ):
'''simple docstring'''
return 1_0_0
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def snake_case ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.dummy_unet
lowerCAmelCase__ :str = self.dummy_movq
lowerCAmelCase__ :Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ :Any = DDIMScheduler(**__UpperCAmelCase )
lowerCAmelCase__ :int = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
lowerCAmelCase__ :Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ :str = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[Any] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'cpu'
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :Optional[Any] = self.pipeline_class(**__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images
lowerCAmelCase__ :Dict = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase__ :str = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase__ :Optional[Any] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
lowerCAmelCase__ :str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ :Any = 'A red cartoon frog, 4k'
lowerCAmelCase__ :Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[Any] = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ :str = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ :List[Any] = pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
lowerCAmelCase__ :Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 560 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( _snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCAmelCase__ : int = 4 , lowerCAmelCase__ : int = 7_6_8 , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , ) -> Optional[int]:
super().__init__()
UpperCAmelCase = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
UpperCAmelCase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
UpperCAmelCase = clip_extra_context_tokens
UpperCAmelCase = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = nn.LayerNorm(lowerCAmelCase__ )
def _UpperCamelCase ( self : str , *, lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase = image_embeddings.shape[0]
UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase = self.embedding_proj(lowerCAmelCase__ )
UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
UpperCAmelCase = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase = self.encoder_hidden_states_proj(lowerCAmelCase__ )
UpperCAmelCase = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str]=1_0 ):
__a : Optional[Any] = []
for _ in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=1_0 ):
__a : Union[str, Any] = []
for step in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = os.path.join(lowerCamelCase_ , 'schedule.bin' )
torch.save(scheduler.state_dict() , lowerCamelCase_ )
__a : Tuple = torch.load(lowerCamelCase_ )
scheduler.load_state_dict(lowerCamelCase_ )
return lrs
@require_torch
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : List[str] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : int = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : int = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = 10
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : str = data
__a : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : List[Any] = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__a : Dict = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule
__a : Optional[int] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f'''failed for {scheduler_func} in save and reload''' )
class _UpperCamelCase:
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : str = fn
def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : List[Any] = list(map(self , scheduler.lr_lambdas ) )
| 47 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
lowerCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowercase : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,attention_head_dim=(2, 4) ,use_linear_projection=UpperCamelCase ,addition_embed_type='text_time' ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
_lowercase : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,steps_offset=1 ,beta_schedule='scaled_linear' ,timestep_spacing='leading' ,)
torch.manual_seed(0 )
_lowercase : int = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_lowercase : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=32 ,)
_lowercase : Optional[int] = CLIPTextModel(UpperCamelCase )
_lowercase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ,local_files_only=UpperCamelCase )
_lowercase : Optional[int] = CLIPTextModelWithProjection(UpperCamelCase )
_lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ,local_files_only=UpperCamelCase )
_lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Tuple=0 ) -> str:
_lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : Optional[int] = image / 2 + 0.5
if str(UpperCamelCase ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(UpperCamelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_lowercase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.7_5,
}
return inputs
def _lowerCamelCase ( self : List[Any] ) -> Any:
_lowercase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Dict = StableDiffusionXLImgaImgPipeline(**UpperCamelCase )
_lowercase : Optional[Any] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase )
_lowercase : Optional[int] = sd_pipe(**UpperCamelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[int] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self : Tuple ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCamelCase ( self : List[Any] ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
pass
def _lowerCamelCase ( self : List[Any] ) -> List[Any]:
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase )
_lowercase : int = sd_pipe.to(UpperCamelCase )
_lowercase : Any = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
# forward without prompt embeds
_lowercase : List[str] = self.get_dummy_inputs(UpperCamelCase )
_lowercase : Optional[int] = 3 * ['this is a negative prompt']
_lowercase : Optional[int] = negative_prompt
_lowercase : List[Any] = 3 * [inputs['prompt']]
_lowercase : Union[str, Any] = sd_pipe(**UpperCamelCase )
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowercase : Tuple = self.get_dummy_inputs(UpperCamelCase )
_lowercase : Optional[Any] = 3 * ['this is a negative prompt']
_lowercase : str = 3 * [inputs.pop('prompt' )]
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = sd_pipe.encode_prompt(UpperCamelCase ,negative_prompt=UpperCamelCase )
_lowercase : List[str] = sd_pipe(
**UpperCamelCase ,prompt_embeds=UpperCamelCase ,negative_prompt_embeds=UpperCamelCase ,pooled_prompt_embeds=UpperCamelCase ,negative_pooled_prompt_embeds=UpperCamelCase ,)
_lowercase : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : str ,UpperCamelCase : List[str]="cpu" ,UpperCamelCase : str=torch.floataa ,UpperCamelCase : int=0 ) -> Any:
_lowercase : Optional[int] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_lowercase : Tuple = np.random.RandomState(UpperCamelCase ).standard_normal((1, 4, 64, 64) )
_lowercase : str = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ,dtype=UpperCamelCase )
_lowercase : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ) -> Tuple:
_lowercase : Dict = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : int = self.get_inputs(UpperCamelCase )
_lowercase : str = pipe(**UpperCamelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowercase : Dict = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3 | 125 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase_ :
def __init__( self ) -> None:
__lowercase : int = [2, 1, 2, -1]
__lowercase : Optional[int] = [1, 2, 3, 4]
def _lowerCamelCase ( self ) -> list[float]:
__lowercase : List[Any] = len(self.first_signal )
__lowercase : Optional[int] = len(self.second_signal )
__lowercase : Optional[Any] = max(UpperCamelCase_ , UpperCamelCase_ )
# create a zero matrix of max_length x max_length
__lowercase : List[str] = [[0] * max_length for i in range(UpperCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase_ ):
__lowercase : Any = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase_ )
for j, item in enumerate(UpperCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase : Union[str, Any] = np.matmul(np.transpose(UpperCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 704 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __UpperCamelCase ):
for param in module.parameters():
__lowercase : Tuple = False
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase : List[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = plt.imshow(__UpperCamelCase )
fig.axes.get_xaxis().set_visible(__UpperCamelCase )
fig.axes.get_yaxis().set_visible(__UpperCamelCase )
plt.show()
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = datetime.now()
__lowercase : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 523 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class a__ ( __snake_case ):
UpperCAmelCase__ = '''gpt_bigcode'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Union[str, Any] , _lowerCamelCase :Tuple=50_257 , _lowerCamelCase :Tuple=1_024 , _lowerCamelCase :Union[str, Any]=768 , _lowerCamelCase :Tuple=12 , _lowerCamelCase :Tuple=12 , _lowerCamelCase :Dict=None , _lowerCamelCase :Any="gelu_pytorch_tanh" , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Any=0.1 , _lowerCamelCase :List[Any]=0.1 , _lowerCamelCase :Union[str, Any]=1E-5 , _lowerCamelCase :Any=0.02 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :int=50_256 , _lowerCamelCase :Dict=50_256 , _lowerCamelCase :List[str]=True , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Any=True , **_lowerCamelCase :List[str] , ):
'''simple docstring'''
UpperCamelCase_ : Dict =vocab_size
UpperCamelCase_ : Optional[Any] =n_positions
UpperCamelCase_ : Any =n_embd
UpperCamelCase_ : str =n_layer
UpperCamelCase_ : Dict =n_head
UpperCamelCase_ : str =n_inner
UpperCamelCase_ : Tuple =activation_function
UpperCamelCase_ : Union[str, Any] =resid_pdrop
UpperCamelCase_ : Dict =embd_pdrop
UpperCamelCase_ : Any =attn_pdrop
UpperCamelCase_ : Optional[Any] =layer_norm_epsilon
UpperCamelCase_ : List[str] =initializer_range
UpperCamelCase_ : int =scale_attn_weights
UpperCamelCase_ : Any =use_cache
UpperCamelCase_ : Any =attention_softmax_in_fpaa
UpperCamelCase_ : List[str] =scale_attention_softmax_in_fpaa
UpperCamelCase_ : List[str] =multi_query
UpperCamelCase_ : Tuple =bos_token_id
UpperCamelCase_ : Optional[int] =eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 357 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_text_model'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int]=5_0_2_4_4 , __lowerCamelCase : int=7_6_8 , __lowerCamelCase : Union[str, Any]=6_4 , __lowerCamelCase : Dict=2_0_4_8 , __lowerCamelCase : int=1_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : int=1.0 , __lowerCamelCase : Optional[int]="gelu_new" , __lowerCamelCase : int=0 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = d_kv
UpperCAmelCase = d_ff
UpperCAmelCase = num_layers
UpperCAmelCase = num_heads
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_factor
UpperCAmelCase = use_cache
UpperCAmelCase = eos_token_id
UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , is_decoder=__lowerCamelCase , **__lowerCamelCase , )
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_vision_model'''
def __init__( self : str , __lowerCamelCase : Any=7_6_8 , __lowerCamelCase : Dict=7_6_8 , __lowerCamelCase : List[Any]=2_0_4_8 , __lowerCamelCase : List[Any]=6_4 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Tuple=1e-6 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=1e-1_0 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Any=4_0_9_6 , __lowerCamelCase : Dict=3_2 , __lowerCamelCase : Any=1_2_8 , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = patch_embed_hidden_size
UpperCAmelCase = d_ff
UpperCAmelCase = dropout_rate
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = dense_act_fn
UpperCAmelCase = seq_len
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = d_kv
@classmethod
def _lowercase ( cls : Any , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct'''
UpperCamelCase = True
def __init__( self : str , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=True , **__lowerCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase = PixaStructTextConfig(**__lowerCamelCase )
UpperCAmelCase = PixaStructVisionConfig(**__lowerCamelCase )
UpperCAmelCase = self.text_config.decoder_start_token_id
UpperCAmelCase = self.text_config.pad_token_id
UpperCAmelCase = self.text_config.eos_token_id
UpperCAmelCase = initializer_factor
UpperCAmelCase = initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = is_vqa
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : PixaStructTextConfig , __lowerCamelCase : PixaStructVisionConfig , **__lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 377 | 0 |
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : int , __A : List[Any] ):
# we need a list not a string, so do something to change the type
snake_case__ : Optional[Any] = arr.split("," )
def _lowercase ( self : Tuple ):
snake_case__ : str = [int(self.array[0] )] * len(self.array )
snake_case__ : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case__ : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case__ : Dict = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowerCamelCase : Any = input("""please input some numbers:""")
__lowerCamelCase : int = SubArray(whole_array)
__lowerCamelCase : int = array.solve_sub_array()
print(("""the results is:""", re))
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
_lowerCamelCase : Dict = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
_lowerCamelCase : List[Any] = -1
return False
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return [] | 44 | '''simple docstring'''
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ):
from torch.utils.cpp_extension import load
_A = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_A = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 107 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase__ ) , torch_builtin(UpperCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase__ ) , gelu_new(UpperCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase = get_activation("gelu" )
UpperCAmelCase = get_activation("gelu_10" )
UpperCAmelCase = torch_builtin(UpperCamelCase__ )
UpperCAmelCase = geluaa(UpperCamelCase__ )
UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(UpperCamelCase__ ):
get_activation("bogus" )
with self.assertRaises(UpperCamelCase__ ):
get_activation(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase = get_activation("gelu" )
UpperCAmelCase = 1
UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase__ ):
UpperCAmelCase = acta.a
| 457 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
UpperCAmelCase = controlnet_params
UpperCAmelCase = "bird"
UpperCAmelCase = jax.device_count()
UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
UpperCAmelCase = replicate(UpperCamelCase__ )
UpperCAmelCase = shard(UpperCamelCase__ )
UpperCAmelCase = shard(UpperCamelCase__ )
UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
UpperCAmelCase = controlnet_params
UpperCAmelCase = "Chef in the kitchen"
UpperCAmelCase = jax.device_count()
UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
UpperCAmelCase = replicate(UpperCamelCase__ )
UpperCAmelCase = shard(UpperCamelCase__ )
UpperCAmelCase = shard(UpperCamelCase__ )
UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 457 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 18}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = LevitImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( ) -> List[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE_ = os.getenv('SM_HP_MP_PARAMETERS' ,'{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE_ = json.loads(lowerCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE_ = os.getenv('SM_FRAMEWORK_PARAMS' ,'{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE_ = json.loads(lowerCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' ,lowerCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = field(
default="""""", metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""}, )
def a__ ( self ) -> Union[str, Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.', _lowercase, )
@cached_property
def a__ ( self ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
SCREAMING_SNAKE_CASE_ = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE_ = smp.local_rank()
SCREAMING_SNAKE_CASE_ = torch.device('cuda', _lowercase )
SCREAMING_SNAKE_CASE_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp', timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_ = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
SCREAMING_SNAKE_CASE_ = torch.device('cuda', self.local_rank )
SCREAMING_SNAKE_CASE_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl', timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_ = torch.device('cuda', self.local_rank )
SCREAMING_SNAKE_CASE_ = 1
if device.type == "cuda":
torch.cuda.set_device(_lowercase )
return device
@property
def a__ ( self ) -> Optional[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def a__ ( self ) -> Optional[Any]:
return not is_sagemaker_model_parallel_available()
@property
def a__ ( self ) -> Tuple:
return False
| 294 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase_)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
with self.assertRaises(lowercase_):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_)
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
"""simple docstring"""
from typing import List
import numpy as np
def __UpperCamelCase ( snake_case__ ):
A_ : Optional[Any] = {key: len(snake_case__ ) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
A_ : List[str] = max(lists_lengths.values() , default=0 )
return max(1 , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Union[str, Any] = []
for group_idx in range(snake_case__ ):
A_ : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ : Dict = range(snake_case__ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case__ )
return shards_indices_per_group
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = _number_of_shards_in_gen_kwargs(snake_case__ )
if num_shards == 1:
return [dict(snake_case__ )]
else:
A_ : Dict = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__ ) )
]
def __UpperCamelCase ( snake_case__ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Dict = {len(snake_case__ ) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__ )}
A_ : Any = {}
for size in list_sizes:
A_ : str = list(range(snake_case__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ : int = dict(snake_case__ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__ ):
A_ : int = [value[i] for i in indices_per_size[len(snake_case__ )]]
return shuffled_kwargs
| 180 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
A_ : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {}
A_ : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
A_ : int = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ):
super().__init__()
A_ : str = Path(lowerCAmelCase_ ).joinpath(type_path + """.source""" )
A_ : Tuple = Path(lowerCAmelCase_ ).joinpath(type_path + """.target""" )
A_ : Optional[Any] = self.get_char_lens(self.src_file )
A_ : Optional[Any] = max_source_length
A_ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A_ : Tuple = tokenizer
A_ : Optional[int] = prefix
if n_obs is not None:
A_ : Union[str, Any] = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Union[str, Any] = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowerCAmelCase_ ):
A_ : Optional[Any] = index + 1 # linecache starts at 1
A_ : int = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
A_ : Any = linecache.getline(str(self.tgt_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
A_ : str = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""" )
A_ : Optional[Any] = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""" )
A_ : int = source_inputs["""input_ids"""].squeeze()
A_ : int = target_inputs["""input_ids"""].squeeze()
A_ : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase(lowerCAmelCase_ ):
return [len(lowerCAmelCase_ ) for x in Path(lowerCAmelCase_ ).open().readlines()]
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[str] = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[int] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ )
A_ , A_ : Dict = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
A_ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __UpperCamelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __UpperCamelCase ( ):
A_ : Optional[int] = git.Repo(search_parent_directories=snake_case__ )
A_ : Union[str, Any] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , """wb""" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
A_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = normalize_answer(snake_case__ ).split()
A_ : Dict = normalize_answer(snake_case__ ).split()
A_ : int = Counter(snake_case__ ) & Counter(snake_case__ )
A_ : Dict = sum(common.values() )
if num_same == 0:
return 0
A_ : str = 1.0 * num_same / len(snake_case__ )
A_ : Any = 1.0 * num_same / len(snake_case__ )
A_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
A_ : Optional[Any] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __UpperCamelCase ( snake_case__ ):
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
A_ : Dict = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 180 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( snake_case__ ):
_lowerCamelCase : int = '''Salesforce/blip-image-captioning-base'''
_lowerCamelCase : str = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_lowerCamelCase : int = '''image_captioner'''
_lowerCamelCase : Union[str, Any] = AutoModelForVisionaSeq
_lowerCamelCase : Dict = ['''image''']
_lowerCamelCase : Union[str, Any] = ['''text''']
def __init__( self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : int ):
requires_backends(self , ["vision"] )
super().__init__(*_A , **_A )
def lowercase ( self : Dict , snake_case_ : Any ):
return self.pre_processor(images=_A , return_tensors="pt" )
def lowercase ( self : int , snake_case_ : List[Any] ):
return self.model.generate(**_A )
def lowercase ( self : Optional[int] , snake_case_ : str ):
return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
| 711 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase_ ( __lowercase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __lowercase ).groups()[0]
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int=None , snake_case_ : List[Any]=None ):
_UpperCAmelCase = file_names
_UpperCAmelCase = image_transform
_UpperCAmelCase = label_to_id
def __len__( self : Union[str, Any] ):
return len(self.file_names )
def __getitem__( self : Any , snake_case_ : Tuple ):
_UpperCAmelCase = self.file_names[idx]
_UpperCAmelCase = PIL.Image.open(snake_case_ )
_UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
_UpperCAmelCase = self.image_transform(snake_case_ )
_UpperCAmelCase = extract_label(snake_case_ )
if self.label_to_id is not None:
_UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["lr"]
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = config["image_size"]
if not isinstance(__lowercase , (list, tuple) ):
_UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCAmelCase = os.path.split(__lowercase )[-1].split("." )[0]
accelerator.init_trackers(__lowercase , __lowercase )
# Grab all the image filenames
_UpperCAmelCase = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_UpperCAmelCase = [extract_label(__lowercase ) for fname in file_names]
_UpperCAmelCase = list(set(__lowercase ) )
id_to_label.sort()
_UpperCAmelCase = {lbl: i for i, lbl in enumerate(__lowercase )}
# Set the seed before splitting the data.
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# Split our filenames between train and validation
_UpperCAmelCase = np.random.permutation(len(__lowercase ) )
_UpperCAmelCase = int(0.8 * len(__lowercase ) )
_UpperCAmelCase = random_perm[:cut]
_UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCAmelCase = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase )
# For evaluation, we use a deterministic Resize
_UpperCAmelCase = Compose([Resize(__lowercase ), ToTensor()] )
_UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = create_model("resnet50d" , pretrained=__lowercase , num_classes=len(__lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCAmelCase = False
for param in model.get_classifier().parameters():
_UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCAmelCase = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCAmelCase = os.path.splitext(__lowercase )[0]
if "epoch" in training_difference:
_UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
_UpperCAmelCase = None
else:
_UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
_UpperCAmelCase = resume_step // len(__lowercase )
resume_step -= starting_epoch * len(__lowercase )
# Now we train the model
for epoch in range(__lowercase , __lowercase ):
model.train()
if args.with_tracking:
_UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCAmelCase = accelerator.skip_first_batches(__lowercase , __lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = torch.nn.functional.cross_entropy(__lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
model.eval()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = outputs.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
_UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__lowercase ),
"epoch": epoch,
} , step=__lowercase , )
if checkpointing_steps == "epoch":
_UpperCAmelCase = f'epoch_{epoch}'
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__lowercase , default=__lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__lowercase , default=__lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__lowercase , default=__lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 119 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if config is None:
assert isinstance(self.model , UpperCamelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__UpperCAmelCase : Optional[int] = self.model.config
else:
__UpperCAmelCase : int = config
__UpperCAmelCase : Union[str, Any] = data_args
__UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
__UpperCAmelCase : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase : List[Any] = label_smoothed_nll_loss
def _snake_case ( self , UpperCamelCase_ ):
if self.optimizer is None:
__UpperCAmelCase : Optional[Any] = ["bias", "LayerNorm.weight"]
__UpperCAmelCase : Union[str, Any] = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__UpperCAmelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase : int = Adafactor
__UpperCAmelCase : Any = {"scale_parameter": False, "relative_step": False}
else:
__UpperCAmelCase : List[str] = AdamW
__UpperCAmelCase : Optional[int] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__UpperCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase : Dict = OSS(
params=UpperCamelCase_ , optim=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__UpperCAmelCase : Dict = optimizer_cls(UpperCamelCase_ , **UpperCamelCase_ )
if self.lr_scheduler is None:
__UpperCAmelCase : Dict = self._get_lr_scheduler(UpperCamelCase_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase : Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase_ )
return scheduler
def _snake_case ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase : List[str] = model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
__UpperCAmelCase : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase : str = model(**UpperCamelCase_ , labels=UpperCamelCase_ , use_cache=UpperCamelCase_ )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase : Dict = model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
__UpperCAmelCase : Optional[int] = torch.nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.loss_fn(UpperCamelCase_ , UpperCamelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = inputs.pop("labels" )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return loss
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ):
__UpperCAmelCase : Optional[Any] = self._prepare_inputs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase : Union[str, Any] = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCamelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase : Optional[int] = self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
__UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
__UpperCAmelCase : Union[str, Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase : Optional[Any] = tensor
return padded_tensor
| 168 | '''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : Any = 0.00
__UpperCAmelCase : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase__ )
first_sum += 1 / float(lowerCamelCase__ )
index += 1
return 1 / first_sum
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : int = 0.00
__UpperCAmelCase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= RoCBertTokenizer
A__= None
A__= False
A__= True
A__= filter_non_english
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for i, value in enumerate(_lowercase ):
UpperCAmelCase__ = i
UpperCAmelCase__ = i
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase__ = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(_lowercase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase__ = {}
for i, token in enumerate(_lowercase ):
UpperCAmelCase__ = i
UpperCAmelCase__ = RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
UpperCAmelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCAmelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase__ = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
UpperCAmelCase__ = tokenizer_r.do_lower_case if hasattr(_lowercase , "do_lower_case" ) else False
UpperCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = ["的", "人", "有"]
UpperCAmelCase__ = "".join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ = True
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCAmelCase__ = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_lowercase )
UpperCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = False
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCAmelCase__ = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_lowercase )
UpperCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase__ = tokenizer.encode("你好" , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer.encode("你是谁" , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_lowercase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ = "你好,你是谁"
UpperCAmelCase__ = tokenizer.tokenize(_lowercase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_shape_ids(_lowercase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
UpperCAmelCase__ = tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
UpperCAmelCase__ = tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 277 |
from __future__ import annotations
class lowercase__ :
def __init__( self : int , _lowercase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_lowercase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.rows )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return bool(self.determinant() )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def _UpperCAmelCase ( self : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Tuple ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_lowercase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_lowercase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , _lowercase : object ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowercase : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowercase : Matrix | int | float ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : list[int] , _lowercase : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case (UpperCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(r"""#.*""" , """""" , UpperCamelCase ) # remove comments
if line:
filtered_lines.append(UpperCamelCase )
lowerCamelCase__ = """\n""".join(UpperCamelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
a__ : int = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a__ : Any = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a__ : Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
a__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 165 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'data2vec-vision'
def __init__( self : Any , a_ : Union[str, Any]=7_68 , a_ : int=12 , a_ : Union[str, Any]=12 , a_ : Any=30_72 , a_ : Any="gelu" , a_ : Union[str, Any]=0.0 , a_ : Tuple=0.0 , a_ : Any=0.0_2 , a_ : int=1e-12 , a_ : Optional[Any]=2_24 , a_ : List[Any]=16 , a_ : Dict=3 , a_ : Optional[Any]=False , a_ : Union[str, Any]=False , a_ : int=False , a_ : Any=False , a_ : Tuple=0.1 , a_ : Tuple=0.1 , a_ : Dict=True , a_ : Tuple=[3, 5, 7, 11] , a_ : int=[1, 2, 3, 6] , a_ : List[Any]=True , a_ : Any=0.4 , a_ : int=2_56 , a_ : Any=1 , a_ : List[str]=False , a_ : List[str]=2_55 , **a_ : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = use_mask_token
lowerCamelCase__ = use_absolute_position_embeddings
lowerCamelCase__ = use_relative_position_bias
lowerCamelCase__ = use_shared_relative_position_bias
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ = out_indices
lowerCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ = use_auxiliary_head
lowerCamelCase__ = auxiliary_loss_weight
lowerCamelCase__ = auxiliary_channels
lowerCamelCase__ = auxiliary_num_convs
lowerCamelCase__ = auxiliary_concat_input
lowerCamelCase__ = semantic_loss_ignore_index
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return 1e-4
| 165 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=A__ , default=A__ , required=A__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=A__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=A__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=A__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=A__ , default=0 , help='''cuda_id.''' , )
__lowercase = parser.parse_args()
return args
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if not len(A__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__lowercase , __lowercase = imgs[0].size
__lowercase = Image.new('''RGB''' , size=(cols * w, rows * h) )
__lowercase , __lowercase = grid.size
for i, img in enumerate(A__ ):
grid.paste(A__ , box=(i % cols * w, i // cols * h) )
return grid
def _A ( A__ , A__="robotic cat with wings" , A__=7.5 , A__=50 , A__=1 , A__=42 , ):
"""simple docstring"""
__lowercase = torch.Generator(pipeline.device ).manual_seed(A__ )
__lowercase = pipeline(
A__ , guidance_scale=A__ , num_inference_steps=A__ , generator=A__ , num_images_per_prompt=A__ , ).images
__lowercase = int(math.sqrt(A__ ) )
__lowercase = image_grid(A__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.