code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : List[str] = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
lowerCAmelCase : Tuple = """▁"""
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=100 , snake_case__=None , snake_case__ = None , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Optional[Any] = [F'<extra_id_{i}>' for i in range(snake_case__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Optional[int] = len(set(filter(lambda snake_case__ : bool('extra_id' in str(snake_case__ ) ) , snake_case__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
_lowerCAmelCase : List[Any] = legacy
_lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case__ , **snake_case__ , )
_lowerCAmelCase : str = vocab_file
_lowerCAmelCase : List[str] = extra_ids
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@staticmethod
def a ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[str] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case__ , )
return max_model_length
@property
def a ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def a ( self ):
'''simple docstring'''
return list(
set(filter(lambda snake_case__ : bool(re.search(R'<extra_id_\d+>' , snake_case__ ) ) is not None , self.additional_special_tokens ) ) )
def a ( self ):
'''simple docstring'''
return [self._convert_token_to_id(snake_case__ ) for token in self.get_sentinel_tokens()]
def a ( self , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(snake_case__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : int = self._add_eos_if_not_present(snake_case__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Optional[int] = SPIECE_UNDERLINE + text.replace(snake_case__ , ' ' )
return super().tokenize(snake_case__ , **snake_case__ )
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Dict = text.startswith(snake_case__ )
if is_first:
_lowerCAmelCase : Optional[int] = text[1:]
_lowerCAmelCase : str = self.sp_model.encode(snake_case__ , out_type=snake_case__ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case__ ):
_lowerCAmelCase : List[str] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a ( self , snake_case__ ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
_lowerCAmelCase : Union[str, Any] = re.match(R'<extra_id_(\d+)>' , snake_case__ )
_lowerCAmelCase : Any = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Optional[Any] = self.sp_model.IdToPiece(snake_case__ )
else:
_lowerCAmelCase : List[Any] = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = ''
_lowerCAmelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : str = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 630 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["image_processor", "tokenizer"]
__magic_name__ = "CLIPImageProcessor"
__magic_name__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
_lowerCAmelCase : str = kwargs.pop('feature_extractor' )
_lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
_lowerCAmelCase : int = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
_lowerCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer.model_input_names
_lowerCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case__ , )
return self.image_processor_class
@property
def a ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case__ , )
return self.image_processor
| 630 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self._create_example_records()
_lowerCAmelCase : Dict = Dataset.from_list(snake_case__ )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(snake_case__ ):
self.assertDictEqual(snake_case__ , example_records[i] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._create_example_records()
_lowerCAmelCase : List[Any] = Dataset.from_list(snake_case__ )
_lowerCAmelCase : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self ): # checks what happens with missing columns
'''simple docstring'''
_lowerCAmelCase : int = [{'col_1': 1}, {'col_2': 'x'}]
_lowerCAmelCase : List[str] = Dataset.from_list(snake_case__ )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def a ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
_lowerCAmelCase : Dict = [{'col_1': []}, {'col_1': [1, 2]}]
_lowerCAmelCase : Optional[Any] = Dataset.from_list(snake_case__ )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(snake_case__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 630 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 1 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [0] * len(_A )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
lowerCAmelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 630 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : List[str] = """\
Text data.
Second line of data."""
lowerCAmelCase : Any = """file"""
@pytest.fixture(scope='session' )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_lowerCAmelCase : Tuple = bytes(_A , 'utf-8' )
with zstd.open(_A , 'wb' ) as f:
f.write(_A )
return path
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _A ) , 'w' ) as f:
f.write(_A )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def lowercase (_A , _A , _A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_lowerCAmelCase : Any = input_paths[compression_format]
_lowerCAmelCase : Union[str, Any] = tmp_path / 'cache'
_lowerCAmelCase : List[Any] = DownloadConfig(cache_dir=_A , extract_compressed_file=_A )
_lowerCAmelCase : str = cached_path(_A , download_config=_A )
with open(_A ) as f:
_lowerCAmelCase : List[Any] = f.read()
with open(_A ) as f:
_lowerCAmelCase : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = 'custom_cache'
_lowerCAmelCase : str = 'custom_extracted_dir'
_lowerCAmelCase : Optional[Any] = tmp_path / 'custom_extracted_path'
if default_extracted:
_lowerCAmelCase : List[str] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _A )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : List[str] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase : List[Any] = xz_file
_lowerCAmelCase : Dict = (
DownloadConfig(extract_compressed_file=_A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_A )
)
_lowerCAmelCase : Any = cached_path(_A , download_config=_A )
assert Path(_A ).parent.parts[-2:] == expected
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = str(Path(_A ).resolve() )
assert cached_path(_A ) == text_file
# relative path
_lowerCAmelCase : List[str] = str(Path(_A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_A ) == text_file
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_A ):
cached_path(_A )
# relative path
_lowerCAmelCase : Tuple = './__missing_file__.txt'
with pytest.raises(_A ):
cached_path(_A )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_A ) as f:
_lowerCAmelCase : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _A )
def lowercase ():
"""simple docstring"""
with pytest.raises(_A ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _A )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_A ):
http_get('https://huggingface.co' , temp_file=_A )
with pytest.raises(_A ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _A )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_A ):
ftp_get('ftp://huggingface.co' , temp_file=_A )
with pytest.raises(_A ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _A )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_A ):
fsspec_get('s3://huggingface.co' , temp_file=_A )
with pytest.raises(_A ):
fsspec_head('s3://huggingface.co' )
| 630 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct_text_model"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=5_0244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1E-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[Any] = d_kv
_lowerCAmelCase : Optional[Any] = d_ff
_lowerCAmelCase : Optional[int] = num_layers
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : Optional[Any] = relative_attention_num_buckets
_lowerCAmelCase : List[Any] = relative_attention_max_distance
_lowerCAmelCase : Optional[int] = dropout_rate
_lowerCAmelCase : Dict = layer_norm_epsilon
_lowerCAmelCase : Union[str, Any] = initializer_factor
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : Optional[int] = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase : Dict = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowerCAmelCase : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1E-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : str = patch_embed_hidden_size
_lowerCAmelCase : Optional[int] = d_ff
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = dense_act_fn
_lowerCAmelCase : Union[str, Any] = seq_len
_lowerCAmelCase : Dict = relative_attention_num_buckets
_lowerCAmelCase : Tuple = relative_attention_max_distance
_lowerCAmelCase : List[Any] = d_kv
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowerCAmelCase : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
_lowerCAmelCase : List[Any] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_lowerCAmelCase : Tuple = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_lowerCAmelCase : List[Any] = PixaStructTextConfig(**snake_case__ )
_lowerCAmelCase : Optional[Any] = PixaStructVisionConfig(**snake_case__ )
_lowerCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
_lowerCAmelCase : int = self.text_config.pad_token_id
_lowerCAmelCase : List[str] = self.text_config.eos_token_id
_lowerCAmelCase : Union[str, Any] = initializer_factor
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = self.initializer_range
_lowerCAmelCase : Tuple = self.initializer_range
_lowerCAmelCase : Tuple = is_vqa
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[Any] = self.text_config.to_dict()
_lowerCAmelCase : Tuple = self.vision_config.to_dict()
_lowerCAmelCase : str = self.__class__.model_type
return output
| 630 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 1 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase (_A ) -> str:
"""simple docstring"""
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Optional[Any] = job['started_at']
_lowerCAmelCase : List[Any] = job['completed_at']
_lowerCAmelCase : List[Any] = date_parser.parse(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : str = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_lowerCAmelCase : Any = start
_lowerCAmelCase : str = end
_lowerCAmelCase : int = duration_in_min
return job_info
def lowercase (_A , _A=None ) -> str:
"""simple docstring"""
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_lowerCAmelCase : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase : str = {}
try:
job_time.update({job['name']: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result['jobs']} )
_lowerCAmelCase : List[Any] = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_time.update({job['name']: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result['jobs']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCAmelCase : int = parser.parse_args()
lowerCAmelCase : Optional[int] = get_job_time(args.workflow_run_id)
lowerCAmelCase : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
import math
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ''
try:
with open(__A , 'rb' ) as binary_file:
_lowerCAmelCase : Union[str, Any] = binary_file.read()
for dat in data:
_lowerCAmelCase : Optional[Any] = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = {'0': '0', '1': '1'}
_lowerCAmelCase , _lowerCAmelCase : str = '', ''
_lowerCAmelCase : Union[str, Any] = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
_lowerCAmelCase : Any = last_match_id + '0'
if math.loga(__A ).is_integer():
_lowerCAmelCase : Any = {}
for curr_key in list(__A ):
_lowerCAmelCase : Dict = lexicon.pop(__A )
_lowerCAmelCase : int = new_lex
_lowerCAmelCase : List[str] = last_match_id + '1'
index += 1
_lowerCAmelCase : Optional[int] = ''
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 8
try:
with open(__A , 'wb' ) as opened_file:
_lowerCAmelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase : Any = data_bits[counter:]
_lowerCAmelCase : Dict = data_bits[counter + 1 :]
return data_bits
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = read_file_binary(__A )
_lowerCAmelCase : Tuple = remove_prefix(__A )
_lowerCAmelCase : List[str] = decompress_data(__A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}'
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
if config is None:
assert isinstance(self.model , A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
_lowerCAmelCase : Optional[int] = self.model.config
else:
_lowerCAmelCase : Optional[Any] = config
_lowerCAmelCase : Optional[Any] = data_args
_lowerCAmelCase : List[Any] = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
_lowerCAmelCase : str = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCAmelCase : Any = label_smoothed_nll_loss
def a ( self , snake_case__ ):
'''simple docstring'''
if self.optimizer is None:
_lowerCAmelCase : Optional[int] = ['bias', 'LayerNorm.weight']
_lowerCAmelCase : Union[str, Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_lowerCAmelCase : Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCAmelCase : Dict = Adafactor
_lowerCAmelCase : str = {'scale_parameter': False, 'relative_step': False}
else:
_lowerCAmelCase : str = AdamW
_lowerCAmelCase : Optional[int] = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_lowerCAmelCase : Dict = self.args.learning_rate
if self.sharded_ddp:
_lowerCAmelCase : int = OSS(
params=A_ , optim=A_ , **A_ , )
else:
_lowerCAmelCase : int = optimizer_cls(A_ , **A_ )
if self.lr_scheduler is None:
_lowerCAmelCase : Any = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCAmelCase : Any = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCAmelCase : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ )
return scheduler
def a ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCAmelCase : Optional[Any] = model(**A_ , use_cache=A_ )[0]
_lowerCAmelCase : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_lowerCAmelCase , _lowerCAmelCase : Tuple = model(**A_ , labels=A_ , use_cache=A_ )[:2]
else:
# compute label smoothed loss
_lowerCAmelCase : Dict = model(**A_ , use_cache=A_ )[0]
_lowerCAmelCase : Optional[Any] = torch.nn.functional.log_softmax(A_ , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = inputs.pop('labels' )
_lowerCAmelCase , _lowerCAmelCase : List[str] = self._compute_loss(A_ , A_ , A_ )
return loss
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._prepare_inputs(A_ )
_lowerCAmelCase : str = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCAmelCase : List[str] = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **A_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Dict = self._pad_tensors_to_max_len(A_ , gen_kwargs['max_length'] )
_lowerCAmelCase : Optional[int] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self._compute_loss(A_ , A_ , A_ )
_lowerCAmelCase : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCAmelCase : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Tuple = self._pad_tensors_to_max_len(A_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
_lowerCAmelCase : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_lowerCAmelCase : Any = tensor
return padded_tensor
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
'''simple docstring'''
import math
def lowercase (_A , _A ):
"""simple docstring"""
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def lowercase (_A , _A ):
"""simple docstring"""
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase (_A ):
"""simple docstring"""
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_A )
_lowerCAmelCase : Optional[Any] = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_A )
EnvironmentCommand.register_subcommand(_A )
TestCommand.register_subcommand(_A )
RunBeamCommand.register_subcommand(_A )
DummyDataCommand.register_subcommand(_A )
# Parse args
_lowerCAmelCase : List[Any] = parser.parse_known_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
_lowerCAmelCase : Tuple = parse_unknown_args(_A )
# Run
_lowerCAmelCase : Tuple = args.func(_A , **_A )
service.run()
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = None
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__=False , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
_lowerCAmelCase : List[str] = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**UpperCAmelCase_ )
_lowerCAmelCase : List[str] = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_lowerCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 706 |
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = len(_A )
for i in range(length - 1 ):
_lowerCAmelCase : str = i
for k in range(i + 1 , _A ):
if collection[k] < collection[least]:
_lowerCAmelCase : Optional[int] = k
if least != i:
_lowerCAmelCase : List[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : int = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 707 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 | 0 |
'''simple docstring'''
from math import pi
def lowercase (_A , _A ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 708 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import os
def lowercase (_A = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
_lowerCAmelCase : str = in_file.read()
_lowerCAmelCase : Union[str, Any] = [[int(lowerCamelCase__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
_lowerCAmelCase : Any = [[0 for cell in row] for row in grid]
_lowerCAmelCase : List[str] = len(grid[0] )
_lowerCAmelCase : Any = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
_lowerCAmelCase : str = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
_lowerCAmelCase : int = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
_lowerCAmelCase : Optional[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
_lowerCAmelCase : Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_A , _A , _A , _A="attention" ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
_lowerCAmelCase : Any = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
_lowerCAmelCase : Any = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
_lowerCAmelCase : Optional[int] = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowercase (_A , _A , _A , _A=False ):
"""simple docstring"""
if split_mlp_wi:
_lowerCAmelCase : Optional[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
_lowerCAmelCase : Dict = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
_lowerCAmelCase : Union[str, Any] = (wi_a, wi_a)
else:
_lowerCAmelCase : Tuple = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
_lowerCAmelCase : Tuple = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def lowercase (_A , *, _A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(variables['target'] )
_lowerCAmelCase : Dict = {"/".join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase : Tuple = "encoder/layers_0/mlp/wi_0/kernel" in old
print('Split MLP:' , _lowerCAmelCase )
_lowerCAmelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : int = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
_lowerCAmelCase : Optional[Any] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'attention' )
_lowerCAmelCase : List[Any] = layer_norm
_lowerCAmelCase : List[str] = k.T
_lowerCAmelCase : Tuple = o.T
_lowerCAmelCase : str = q.T
_lowerCAmelCase : Dict = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase : str = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase : Union[str, Any] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , _lowerCAmelCase )
_lowerCAmelCase : int = layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = wi[0].T
_lowerCAmelCase : List[Any] = wi[1].T
else:
_lowerCAmelCase : Optional[Any] = wi.T
_lowerCAmelCase : Optional[int] = wo.T
_lowerCAmelCase : Optional[Any] = old[
"encoder/relpos_bias/rel_embedding"
].T
_lowerCAmelCase : Optional[Any] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : Optional[int] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
_lowerCAmelCase : List[str] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'self_attention' )
_lowerCAmelCase : Optional[Any] = layer_norm
_lowerCAmelCase : Any = k.T
_lowerCAmelCase : Any = o.T
_lowerCAmelCase : Optional[int] = q.T
_lowerCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase : Any = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
_lowerCAmelCase : int = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'encoder_decoder_attention' )
_lowerCAmelCase : int = layer_norm
_lowerCAmelCase : Union[str, Any] = k.T
_lowerCAmelCase : List[str] = o.T
_lowerCAmelCase : Any = q.T
_lowerCAmelCase : str = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase : Optional[int] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase : Optional[int] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , _lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_lowerCAmelCase : Optional[int] = wi[0].T
_lowerCAmelCase : List[Any] = wi[1].T
else:
_lowerCAmelCase : Dict = wi.T
_lowerCAmelCase : Dict = wo.T
_lowerCAmelCase : Union[str, Any] = old["decoder/decoder_norm/scale"]
_lowerCAmelCase : Union[str, Any] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase : Tuple = old["decoder/logits_dense/kernel"].T
return new
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : Any = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : Tuple = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_lowerCAmelCase : Optional[Any] = state_dict["shared.weight"]
return state_dict
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
_lowerCAmelCase : Tuple = convert_tax_to_pytorch(_lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = make_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
def lowercase (_A , _A , _A , _A = False ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = TaConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase : Optional[Any] = TaEncoderModel(_lowerCAmelCase )
else:
_lowerCAmelCase : Any = TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(1_25.50, 0.05) = }''')
| 712 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house'
_lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] )
_lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowerCAmelCase : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state']
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 630 | 0 |
'''simple docstring'''
import math
import os
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
_lowerCAmelCase : str = binary_file.read()
for dat in data:
_lowerCAmelCase : Any = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
lexicon.pop(__UpperCAmelCase )
_lowerCAmelCase : str = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
_lowerCAmelCase : Any = '0' + lexicon[curr_key]
_lowerCAmelCase : Any = bin(__UpperCAmelCase )[2:]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = {'0': '0', '1': '1'}
_lowerCAmelCase , _lowerCAmelCase : Any = '', ''
_lowerCAmelCase : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
_lowerCAmelCase : List[Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowerCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = os.path.getsize(__UpperCAmelCase )
_lowerCAmelCase : int = bin(__UpperCAmelCase )[2:]
_lowerCAmelCase : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
_lowerCAmelCase : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = read_file_binary(__UpperCAmelCase )
_lowerCAmelCase : List[str] = compress_data(__UpperCAmelCase )
_lowerCAmelCase : Optional[int] = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 713 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 0 |
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = int(lowerCAmelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : Any = divmod(lowerCAmelCase__ , 2 )
return binary_recursive(lowerCAmelCase__ ) + str(lowerCAmelCase__ )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = str(lowerCAmelCase__ ).strip()
if not number:
raise ValueError('No input value was provided' )
_lowerCAmelCase : List[Any] = '-' if number.startswith('-' ) else ''
_lowerCAmelCase : Dict = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase__ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 0 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [0, 2, 4, 6, 8]
lowerCAmelCase : Tuple = [1, 3, 5, 7, 9]
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase : Union[str, Any] = 0
for digit in range(1_0 ):
_lowerCAmelCase : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __A , __A )
return result
_lowerCAmelCase : Optional[Any] = 0
for digita in range(1_0 ):
_lowerCAmelCase : Tuple = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase : Tuple = ODD_DIGITS
else:
_lowerCAmelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __A , __A , )
return result
def lowercase (_A = 9 ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__A , 0 , [0] * length , __A )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
__magic_name__ = "blenderbot-small"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0265 , snake_case__=512 , snake_case__=8 , snake_case__=2048 , snake_case__=16 , snake_case__=8 , snake_case__=2048 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=True , snake_case__=True , snake_case__="gelu" , snake_case__=512 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[str] = d_model
_lowerCAmelCase : List[Any] = encoder_ffn_dim
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : List[str] = encoder_attention_heads
_lowerCAmelCase : List[Any] = decoder_ffn_dim
_lowerCAmelCase : Dict = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : str = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Optional[Any] = init_std
_lowerCAmelCase : Optional[Any] = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase : str = {0: 'batch'}
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase : Any = self.num_layers
for i in range(_A ):
_lowerCAmelCase : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowerCAmelCase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def a ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super().outputs
else:
_lowerCAmelCase : Optional[int] = super(_A , self ).outputs
if self.use_past:
_lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(_A ):
_lowerCAmelCase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
_lowerCAmelCase : List[str] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
_lowerCAmelCase : str = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : Optional[int] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : str = common_inputs['input_ids'].shape
_lowerCAmelCase : Tuple = common_inputs['decoder_input_ids'].shape[1]
_lowerCAmelCase : int = self.num_attention_heads
_lowerCAmelCase : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[Any] = decoder_seq_length + 3
_lowerCAmelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A , _A )] , dim=1 )
_lowerCAmelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase : Optional[Any] = self.num_layers
_lowerCAmelCase : List[Any] = min(_A , _A )
_lowerCAmelCase : str = max(_A , _A ) - min_num_layers
_lowerCAmelCase : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
_lowerCAmelCase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : str = seqlen + 2
_lowerCAmelCase : Dict = self.num_layers
_lowerCAmelCase : Any = self.num_attention_heads
_lowerCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[int] = common_inputs['attention_mask'].dtype
_lowerCAmelCase : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
_lowerCAmelCase : str = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add(_A )
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
_lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[Any] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
_lowerCAmelCase : str = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCAmelCase : Dict = [True] * (num + 1)
_lowerCAmelCase : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Tuple = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=128 , snake_case__=32 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Optional[Any] = scope
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def a ( self ):
'''simple docstring'''
(
_lowerCAmelCase
) : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = NezhaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_lowerCAmelCase : Any = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = True
_lowerCAmelCase : str = NezhaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
_lowerCAmelCase : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
_lowerCAmelCase : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = NezhaForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = NezhaForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = NezhaForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Dict = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = NezhaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Union[str, Any] = NezhaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = NezhaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_choices
_lowerCAmelCase : List[str] = NezhaForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : List[str] = config_and_inputs
_lowerCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
def a ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = NezhaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
(
_lowerCAmelCase
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = NezhaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
@require_torch_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Optional[Any] = model_class(config=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.jit.trace(
lowerCamelCase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'bert.pt' ) )
_lowerCAmelCase : Optional[Any] = torch.jit.load(os.path.join(lowerCamelCase_ , 'bert.pt' ) , map_location=lowerCamelCase_ )
loaded(inputs_dict['input_ids'].to(lowerCamelCase_ ) , inputs_dict['attention_mask'].to(lowerCamelCase_ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
_lowerCAmelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_lowerCAmelCase : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_lowerCAmelCase : List[str] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , lowerCamelCase_ )
_lowerCAmelCase : Any = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase : Dict = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = EfficientNetConfig()
_lowerCAmelCase : int = CONFIG_MAP[model_name]['hidden_dim']
_lowerCAmelCase : List[str] = CONFIG_MAP[model_name]['width_coef']
_lowerCAmelCase : Any = CONFIG_MAP[model_name]['depth_coef']
_lowerCAmelCase : Any = CONFIG_MAP[model_name]['image_size']
_lowerCAmelCase : int = CONFIG_MAP[model_name]['dropout_rate']
_lowerCAmelCase : Any = CONFIG_MAP[model_name]['dw_padding']
_lowerCAmelCase : Optional[Any] = 'huggingface/label-files'
_lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
_lowerCAmelCase : Tuple = 1_0_0_0
_lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Dict = idalabel
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Any = Image.open(requests.get(_A , stream=_A ).raw )
return im
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = CONFIG_MAP[model_name]['image_size']
_lowerCAmelCase : Optional[Any] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_A , )
return preprocessor
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowerCAmelCase : List[str] = sorted(set(_A ) )
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : str = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
_lowerCAmelCase : Tuple = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowerCAmelCase : Optional[Any] = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowerCAmelCase : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowerCAmelCase : Optional[Any] = 'efficientnet.' + item[1]
_lowerCAmelCase : Tuple = 'classifier.weight'
_lowerCAmelCase : str = 'classifier.bias'
return key_mapping
def lowercase (_A , _A , _A ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowerCAmelCase : Optional[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowerCAmelCase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowerCAmelCase : Any = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowerCAmelCase : Dict = torch.from_numpy(np.transpose(_A ) )
else:
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = model_classes[model_name](
include_top=_A , weights='imagenet' , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_0_0_0 , classifier_activation='softmax' , )
_lowerCAmelCase : Tuple = original_model.trainable_variables
_lowerCAmelCase : Dict = original_model.non_trainable_variables
_lowerCAmelCase : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowerCAmelCase : List[str] = param.numpy()
_lowerCAmelCase : int = list(tf_params.keys() )
# Load HuggingFace model
_lowerCAmelCase : List[str] = get_efficientnet_config(_A )
_lowerCAmelCase : str = EfficientNetForImageClassification(_A ).eval()
_lowerCAmelCase : Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowerCAmelCase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
_lowerCAmelCase : List[str] = convert_image_processor(_A )
_lowerCAmelCase : Any = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = hf_model(**_A )
_lowerCAmelCase : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[str] = CONFIG_MAP[model_name]['image_size']
_lowerCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowerCAmelCase : int = image.img_to_array(_A )
_lowerCAmelCase : Tuple = np.expand_dims(_A , axis=0 )
_lowerCAmelCase : Optional[Any] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
_lowerCAmelCase : Dict = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = """beit"""
def __init__( self , snake_case__=8192 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=[3, 5, 7, 11] , snake_case__=[1, 2, 3, 6] , snake_case__=True , snake_case__=0.4 , snake_case__=256 , snake_case__=1 , snake_case__=False , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[Any] = use_mask_token
_lowerCAmelCase : Union[str, Any] = use_absolute_position_embeddings
_lowerCAmelCase : Any = use_relative_position_bias
_lowerCAmelCase : Optional[Any] = use_shared_relative_position_bias
_lowerCAmelCase : Dict = layer_scale_init_value
_lowerCAmelCase : Optional[int] = drop_path_rate
_lowerCAmelCase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : str = out_indices
_lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : Any = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Optional[int] = auxiliary_concat_input
_lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=125 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Dict = [F'<extra_id_{i}>' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Optional[Any] = len(set(filter(lambda snake_case__ : bool('extra_id' in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_lowerCAmelCase : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
_lowerCAmelCase : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
_lowerCAmelCase : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
_lowerCAmelCase : List[str] = extra_ids
_lowerCAmelCase : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase : int = len(self.special_tokens_encoder )
_lowerCAmelCase : Tuple = len(_lowercase )
for i, token in enumerate(_lowercase ):
_lowerCAmelCase : List[str] = self.vocab_size + i - n
_lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a ( self ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self , snake_case__ ):
'''simple docstring'''
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : List[Any] = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = [chr(_lowercase ) for i in text.encode('utf-8' )]
return tokens
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.special_tokens_encoder:
_lowerCAmelCase : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[Any] = self.added_tokens_encoder[token]
elif len(_lowercase ) != 1:
_lowerCAmelCase : List[Any] = self.unk_token_id
else:
_lowerCAmelCase : List[Any] = ord(_lowercase ) + self._num_special_tokens
return token_id
def a ( self , snake_case__ ):
'''simple docstring'''
if index in self.special_tokens_decoder:
_lowerCAmelCase : Dict = self.special_tokens_decoder[index]
else:
_lowerCAmelCase : Any = chr(index - self._num_special_tokens )
return token
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase : List[Any] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_lowerCAmelCase : Optional[Any] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_lowerCAmelCase : Optional[Any] = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_lowerCAmelCase : Optional[int] = token.encode('utf-8' )
else:
_lowerCAmelCase : Optional[Any] = bytes([ord(_lowercase )] )
bstring += tok_string
_lowerCAmelCase : List[Any] = bstring.decode('utf-8' , errors='ignore' )
return string
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return ()
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
import numpy as np
def lowercase (_A , _A , _A , _A , _A ) -> Any:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = int(np.ceil((x_end - xa) / h ) )
_lowerCAmelCase : str = np.zeros((n + 1,) )
_lowerCAmelCase : str = ya
_lowerCAmelCase : int = xa
for k in range(snake_case__ ):
_lowerCAmelCase : Any = f(snake_case__ , y[k] )
_lowerCAmelCase : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCAmelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCAmelCase : Union[str, Any] = f(x + h , y[k] + h * ka )
_lowerCAmelCase : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[Any] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
import os
lowerCAmelCase : Dict = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[Any] = 0
while index < len(_A ) - 1:
_lowerCAmelCase : Tuple = SYMBOLS[numerals[index]]
_lowerCAmelCase : Union[str, Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = ''
_lowerCAmelCase : List[Any] = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
_lowerCAmelCase : Optional[Any] = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
_lowerCAmelCase : str = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase (_A = "/p089_roman.txt" ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = 0
with open(os.path.dirname(_A ) + roman_numerals_filename ) as filea:
_lowerCAmelCase : int = filea.readlines()
for line in lines:
_lowerCAmelCase : str = line.strip()
_lowerCAmelCase : Union[str, Any] = parse_roman_numerals(_A )
_lowerCAmelCase : Tuple = generate_roman_numerals(_A )
savings += len(_A ) - len(_A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
__magic_name__ = (PNDMScheduler,)
__magic_name__ = (("num_inference_steps", 5_0),)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , __A )
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config(**__A )
_lowerCAmelCase : int = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
_lowerCAmelCase : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
_lowerCAmelCase : List[Any] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
_lowerCAmelCase : int = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self ):
'''simple docstring'''
pass
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[int] = kwargs.pop('num_inference_steps' , __A )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
_lowerCAmelCase : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
_lowerCAmelCase : List[str] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**__A )
_lowerCAmelCase : List[Any] = scheduler_class(**__A )
_lowerCAmelCase : Optional[int] = 10
_lowerCAmelCase : Dict = self.dummy_model()
_lowerCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : List[str] = model(__A , __A )
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : List[Any] = model(__A , __A )
_lowerCAmelCase : List[Any] = scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , __A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**__A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , 'set_timesteps' ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , 'set_timesteps' ):
_lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
_lowerCAmelCase : Any = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
_lowerCAmelCase : Dict = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Dict = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def a ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : str = scheduler_class(**__A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def a ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=__A )
def a ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Dict = self.get_scheduler_config()
_lowerCAmelCase : Dict = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : Optional[int] = scheduler.step_prk(__A , __A , __A ).prev_sample
def a ( self ):
'''simple docstring'''
with self.assertRaises(__A ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop()
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(__A ) )
_lowerCAmelCase : Any = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(__A ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_lowerCAmelCase : Dict = torch.sum(torch.abs(__A ) )
_lowerCAmelCase : str = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_lowerCAmelCase : int = torch.sum(torch.abs(__A ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 704 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = image.size
_lowerCAmelCase : Optional[int] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
_lowerCAmelCase : Dict = np.array(_A ).astype(np.floataa ) / 255.0
_lowerCAmelCase : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : int = torch.from_numpy(_A )
return 2.0 * image - 1.0
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , snake_case__ = None , snake_case__ = 1 , snake_case__ = 100 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
if isinstance(_a , PIL.Image.Image ):
_lowerCAmelCase : Any = 1
elif isinstance(_a , torch.Tensor ):
_lowerCAmelCase : Optional[int] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
_lowerCAmelCase : str = preprocess(_a )
_lowerCAmelCase : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_lowerCAmelCase : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
_lowerCAmelCase : Optional[int] = next(self.unet.parameters() ).dtype
_lowerCAmelCase : int = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
_lowerCAmelCase : List[Any] = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
_lowerCAmelCase : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : List[Any] = {}
if accepts_eta:
_lowerCAmelCase : Union[str, Any] = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
_lowerCAmelCase : List[str] = torch.cat([latents, image] , dim=1 )
_lowerCAmelCase : Optional[Any] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
_lowerCAmelCase : Any = self.vqvae.decode(_a ).sample
_lowerCAmelCase : Tuple = torch.clamp(_a , -1.0 , 1.0 )
_lowerCAmelCase : int = image / 2 + 0.5
_lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 705 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(snake_case__ , 'r' ) as f:
_lowerCAmelCase : Union[str, Any] = f.readlines()
_lowerCAmelCase : int = f'class {class_name}('
_lowerCAmelCase : Optional[int] = f'{4 * " "}def {test_name}('
_lowerCAmelCase : List[Any] = f'{8 * " "}{correct_line.split()[0]}'
_lowerCAmelCase : Optional[Any] = f'{1_6 * " "}{correct_line.split()[0]}'
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = []
for line in lines:
if line.startswith(snake_case__ ):
_lowerCAmelCase : Optional[Any] = True
elif in_class and line.startswith(snake_case__ ):
_lowerCAmelCase : List[Any] = True
elif in_class and in_func and (line.startswith(snake_case__ ) or line.startswith(snake_case__ )):
_lowerCAmelCase : List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCAmelCase : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCAmelCase : Optional[int] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
_lowerCAmelCase : Optional[Any] = False
else:
new_lines.append(snake_case__ )
with open(snake_case__ , 'w' ) as f:
for line in new_lines:
f.write(snake_case__ )
def lowercase (_A , _A=None ):
"""simple docstring"""
if fail is not None:
with open(snake_case__ , 'r' ) as f:
_lowerCAmelCase : List[str] = {l.strip() for l in f.readlines()}
else:
_lowerCAmelCase : str = None
with open(snake_case__ , 'r' ) as f:
_lowerCAmelCase : List[str] = f.readlines()
_lowerCAmelCase : Tuple = defaultdict(snake_case__ )
for line in correct_lines:
_lowerCAmelCase : List[str] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
lowerCAmelCase : Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 706 |
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCAmelCase : Tuple = [True] * (num + 1)
_lowerCAmelCase : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __snake_case ):
_lowerCAmelCase : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 707 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : str = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : List[str] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
lowerCAmelCase : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
lowerCAmelCase : int = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
lowerCAmelCase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Dict = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : List[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class UpperCamelCase__ :
"""simple docstring"""
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
_lowerCAmelCase : Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
_lowerCAmelCase : Optional[int] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
_lowerCAmelCase : str = len(lowercase_ )
_lowerCAmelCase : int = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
F'There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.' )
_lowerCAmelCase : Tuple = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
_lowerCAmelCase : int = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
_lowerCAmelCase : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : Any = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def a ( self , snake_case__ , snake_case__ , snake_case__ = 16 , snake_case__ = 64 , snake_case__ = 4 , ):
'''simple docstring'''
_lowerCAmelCase : str = reader_input["""input_ids"""]
_lowerCAmelCase : Tuple = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(lowercase_ )
_lowerCAmelCase : Optional[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : Any = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(lowercase_ )
_lowerCAmelCase : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Dict = sorted(lowercase_ , key=lambda snake_case__ : x[1] , reverse=lowercase_ )
_lowerCAmelCase : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
_lowerCAmelCase : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = READER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = READER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = ["input_ids", "attention_mask"]
| 708 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
__magic_name__ = """efficientnet"""
def __init__( self , snake_case__ = 3 , snake_case__ = 600 , snake_case__ = 2.0 , snake_case__ = 3.1 , snake_case__ = 8 , snake_case__ = [3, 3, 5, 3, 5, 5, 3] , snake_case__ = [32, 16, 24, 40, 80, 112, 192] , snake_case__ = [16, 24, 40, 80, 112, 192, 320] , snake_case__ = [] , snake_case__ = [1, 2, 2, 2, 1, 2, 1] , snake_case__ = [1, 2, 2, 3, 3, 4, 1] , snake_case__ = [1, 6, 6, 6, 6, 6, 6] , snake_case__ = 0.25 , snake_case__ = "swish" , snake_case__ = 2560 , snake_case__ = "mean" , snake_case__ = 0.02 , snake_case__ = 0.001 , snake_case__ = 0.99 , snake_case__ = 0.5 , snake_case__ = 0.2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Any = width_coefficient
_lowerCAmelCase : Optional[int] = depth_coefficient
_lowerCAmelCase : Tuple = depth_divisor
_lowerCAmelCase : Any = kernel_sizes
_lowerCAmelCase : Any = in_channels
_lowerCAmelCase : int = out_channels
_lowerCAmelCase : List[Any] = depthwise_padding
_lowerCAmelCase : Optional[Any] = strides
_lowerCAmelCase : List[Any] = num_block_repeats
_lowerCAmelCase : List[Any] = expand_ratios
_lowerCAmelCase : List[str] = squeeze_expansion_ratio
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = hidden_dim
_lowerCAmelCase : int = pooling_type
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[Any] = batch_norm_eps
_lowerCAmelCase : Dict = batch_norm_momentum
_lowerCAmelCase : int = dropout_rate
_lowerCAmelCase : List[str] = drop_connect_rate
_lowerCAmelCase : Dict = sum(lowerCAmelCase__ ) * 4
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-5
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
lowerCAmelCase : List[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : str = 0
while place < len(UpperCAmelCase__ ):
if (place + 1 < len(UpperCAmelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = []
for arabic, roman in ROMAN:
((_lowerCAmelCase) , (_lowerCAmelCase)) : int = divmod(UpperCAmelCase__ , UpperCAmelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionLatentUpscalePipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ = frozenset([] )
__magic_name__ = True
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : List[str] = (16, 16)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=UpperCAmelCase_ , only_cross_attention=UpperCAmelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(prediction_type='sample' )
_lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
_lowerCAmelCase : Dict = CLIPTextModel(UpperCAmelCase_ )
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[Any] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith('mps' ):
_lowerCAmelCase : Any = torch.manual_seed(UpperCAmelCase_ )
else:
_lowerCAmelCase : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu'
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Tuple = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : str = pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCAmelCase : Union[str, Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 )
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCAmelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : List[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase : Optional[int] = getattr(UpperCAmelCase_ , scheduler_enum.name )
_lowerCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase : int = pipe(**UpperCAmelCase_ )[0]
outputs.append(UpperCAmelCase_ )
assert check_same_shape(UpperCAmelCase_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = torch.manual_seed(33 )
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_lowerCAmelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowerCAmelCase : str = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='latent' ).images
_lowerCAmelCase : int = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.manual_seed(33 )
_lowerCAmelCase : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Union[str, Any] = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowerCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_lowerCAmelCase : int = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Any = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : str = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def a ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase : Optional[int] = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase__ , repo_id='test-config' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_lowerCAmelCase : Optional[int] = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_lowerCAmelCase : Any = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def a ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_lowerCAmelCase : List[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase : int = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase : Union[str, Any] = c.n_embd + 1 # int
_lowerCAmelCase : Dict = c.resid_pdrop + 1.0 # float
_lowerCAmelCase : Optional[int] = not c.scale_attn_weights # bool
_lowerCAmelCase : int = c.summary_type + '''foo''' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(UpperCamelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCamelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCamelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCamelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = PretrainedConfig()
_lowerCAmelCase : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase : Optional[int] = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase__ , UpperCamelCase__ )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(UpperCamelCase__ )}.' )
def a ( self ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : str = {}
_lowerCAmelCase : Any = HTTPError
_lowerCAmelCase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase__ ) as mock_head:
_lowerCAmelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase : Tuple = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase__ )
_lowerCAmelCase : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase : Any = ['''config.42.0.0.json''']
_lowerCAmelCase : Dict = 768
configuration.save_pretrained(UpperCamelCase__ )
shutil.move(os.path.join(UpperCamelCase__ , 'config.4.0.0.json' ) , os.path.join(UpperCamelCase__ , 'config.42.0.0.json' ) )
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
_lowerCAmelCase : int = '''v4.0.0'''
_lowerCAmelCase : Dict = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase : Optional[int] = '''v3.0.0'''
_lowerCAmelCase : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 712 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house'
_lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] )
_lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowerCAmelCase : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state']
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 630 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = """T5Config"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = jnp.zeros_like(__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_lowerCAmelCase : int = shifted_input_ids.at[:, 0].set(__UpperCamelCase )
_lowerCAmelCase : Dict = jnp.where(shifted_input_ids == -1_0_0 , __UpperCamelCase , __UpperCamelCase )
return shifted_input_ids
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mt5"
__magic_name__ = MTaConfig
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mt5"
__magic_name__ = MTaConfig
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mt5"
__magic_name__ = MTaConfig
| 713 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCAmelCase : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowercase (_A ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
_lowerCAmelCase : Optional[int] = k.replace(_A , _A )
return k
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = DEFAULTS.copy()
cfg_kwargs.update(_A )
_lowerCAmelCase : Tuple = PegasusConfig(**_A )
_lowerCAmelCase : Any = PegasusForConditionalGeneration(_A )
_lowerCAmelCase : Any = torch_model.model.state_dict()
_lowerCAmelCase : int = {}
for k, v in tf_weights.items():
_lowerCAmelCase : Any = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_lowerCAmelCase : Optional[int] = v.T
_lowerCAmelCase : Union[str, Any] = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_lowerCAmelCase : str = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_lowerCAmelCase : Optional[int] = mapping["""shared.weight"""]
_lowerCAmelCase : Dict = mapping["""shared.weight"""]
_lowerCAmelCase : Union[str, Any] = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
_lowerCAmelCase : Tuple = torch_model.model.load_state_dict(_A , strict=_A )
_lowerCAmelCase : List[str] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def lowercase (_A="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = tf.train.list_variables(_A )
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Optional[Any] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
_lowerCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase : List[str] = tf.train.load_variable(_A , _A )
_lowerCAmelCase : Union[str, Any] = array
return tf_weights
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = Path(_A ).parent.name
_lowerCAmelCase : Optional[int] = task_specific_params[f'summarization_{dataset}']["""max_position_embeddings"""]
_lowerCAmelCase : List[str] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
_lowerCAmelCase : Optional[Any] = get_tf_weights_as_numpy(_A )
_lowerCAmelCase : List[str] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_lowerCAmelCase : Any = task_specific_params
_lowerCAmelCase : List[str] = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
_lowerCAmelCase : int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.save_dir is None:
lowerCAmelCase : int = Path(args.tf_ckpt_path).parent.name
lowerCAmelCase : Optional[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
return 1_0 - x * x
def lowercase (_A , _A ):
"""simple docstring"""
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0:
raise ValueError('Wrong space!' )
_lowerCAmelCase : List[str] = a
while (b - a) >= 0.01:
# Find middle point
_lowerCAmelCase : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0:
_lowerCAmelCase : Optional[Any] = c
else:
_lowerCAmelCase : int = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowercase (_A ):
"""simple docstring"""
if hor == 1_2_8:
_lowerCAmelCase : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_lowerCAmelCase : Tuple = (3_2, 1_2_8, 2_5_6)
_lowerCAmelCase : List[str] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
_lowerCAmelCase : Union[str, Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_lowerCAmelCase : str = (3_2, 6_4, 1_2_8, 2_5_6)
_lowerCAmelCase : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_lowerCAmelCase : List[Any] = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
_lowerCAmelCase : Optional[int] = model.state_dict()
_lowerCAmelCase : Union[str, Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_lowerCAmelCase : int = UNetaDModel(**_lowerCAmelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_lowerCAmelCase : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_lowerCAmelCase : Tuple = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_lowerCAmelCase : int = model
_lowerCAmelCase : Tuple = UNetaDModel(**_lowerCAmelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_lowerCAmelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase : List[Any] = random.Random()
def lowercase (_A , _A=1.0 , _A=None , _A=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase : Tuple = global_rng
_lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2000 , snake_case__=10 , snake_case__=160 , snake_case__=8 , snake_case__=0.0 , snake_case__=4000 , snake_case__=False , snake_case__=True , ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Optional[int] = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Tuple = padding_value
_lowerCAmelCase : Union[str, Any] = sampling_rate
_lowerCAmelCase : List[str] = return_attention_mask
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : Optional[int] = chunk_length
_lowerCAmelCase : List[Any] = hop_length
def a ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a ( self , snake_case__=False , snake_case__=False ):
'''simple docstring'''
def _flatten(snake_case__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Tuple = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = WhisperFeatureExtractor if is_speech_available() else None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = WhisperFeatureExtractionTester(self )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCAmelCase : Dict = feat_extract_first.to_dict()
_lowerCAmelCase : Any = feat_extract_second.to_dict()
_lowerCAmelCase : List[Any] = feat_extract_first.mel_filters
_lowerCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = os.path.join(lowerCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCAmelCase : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCAmelCase : Tuple = feat_extract_first.to_dict()
_lowerCAmelCase : Optional[Any] = feat_extract_second.to_dict()
_lowerCAmelCase : Optional[int] = feat_extract_first.mel_filters
_lowerCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : List[Any] = feature_extractor(lowerCamelCase__ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : int = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
_lowerCAmelCase : str = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : Dict = np.asarray(lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
_lowerCAmelCase : str = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test truncation required
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCAmelCase : Any = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCAmelCase : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCAmelCase : int = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
_lowerCAmelCase : Optional[Any] = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def a ( self ):
'''simple docstring'''
import torch
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : int = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase : List[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase : Tuple = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_lowerCAmelCase : int = WhisperFeatureExtractor()
_lowerCAmelCase : List[Any] = feature_extractor(lowerCamelCase__ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[str] = self._load_datasamples(1 )[0]
_lowerCAmelCase : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowerCAmelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1E-3 ) )
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
if index == number_of_items:
return 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[Any] = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase : Dict = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
__magic_name__ = '''glpn'''
def __init__( self , snake_case__=3 , snake_case__=4 , snake_case__=[2, 2, 2, 2] , snake_case__=[8, 4, 2, 1] , snake_case__=[32, 64, 160, 256] , snake_case__=[7, 3, 3, 3] , snake_case__=[4, 2, 2, 2] , snake_case__=[1, 2, 5, 8] , snake_case__=[4, 4, 4, 4] , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.1 , snake_case__=1E-6 , snake_case__=64 , snake_case__=10 , snake_case__=-1 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__a )
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Any = num_encoder_blocks
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : List[str] = sr_ratios
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : Tuple = patch_sizes
_lowerCAmelCase : Optional[int] = strides
_lowerCAmelCase : Optional[Any] = mlp_ratios
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Optional[Any] = decoder_hidden_size
_lowerCAmelCase : Tuple = max_depth
_lowerCAmelCase : Optional[Any] = head_in_index
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Tuple = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ["DeiTFeatureExtractor"]
lowerCAmelCase : Any = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __a ):
"""simple docstring"""
__magic_name__ = (PNDMScheduler,)
__magic_name__ = (("num_inference_steps", 5_0),)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A__ )
return config
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
_lowerCAmelCase : Dict = kwargs.pop('num_inference_steps' , A__ )
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**A__ )
_lowerCAmelCase : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[:]
_lowerCAmelCase : Tuple = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self ):
'''simple docstring'''
pass
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , A__ )
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config(**A__ )
_lowerCAmelCase : Optional[Any] = scheduler_class(**A__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : str = model(A__ , A__ )
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : str = model(A__ , A__ )
_lowerCAmelCase : int = scheduler.step_plms(A__ , A__ , A__ ).prev_sample
return sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = dict(self.forward_default_kwargs )
_lowerCAmelCase : Tuple = kwargs.pop('num_inference_steps' , A__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**A__ )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ , 'set_timesteps' ):
scheduler.set_timesteps(A__ )
elif num_inference_steps is not None and not hasattr(A__ , 'set_timesteps' ):
_lowerCAmelCase : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : str = dummy_past_residuals[:]
_lowerCAmelCase : Dict = scheduler.step_prk(A__ , 0 , A__ , **A__ ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_prk(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Any = scheduler.step_plms(A__ , 0 , A__ , **A__ ).prev_sample
_lowerCAmelCase : str = scheduler.step_plms(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def a ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A__ )
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Optional[int] = scheduler_class(**A__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def a ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A__ )
def a ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : Dict = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
def a ( self ):
'''simple docstring'''
with self.assertRaises(A__ ):
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**A__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.full_loop()
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_lowerCAmelCase : List[str] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : int = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ = Features({"image": Image()} )
__magic_name__ = Features({"labels": ClassLabel} )
__magic_name__ = "image"
__magic_name__ = "labels"
def a ( self , snake_case__ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_lowerCAmelCase : Tuple = copy.deepcopy(self )
_lowerCAmelCase : Dict = self.label_schema.copy()
_lowerCAmelCase : Union[str, Any] = features[self.label_column]
_lowerCAmelCase : List[Any] = label_schema
return task_template
@property
def a ( self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = LxmertConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase : Union[str, Any] = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase ():
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase ():
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase ():
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase (_A , _A , _A , _A=1_0_2_4 ):
"""simple docstring"""
_lowerCAmelCase : Any = [], []
_lowerCAmelCase : Union[str, Any] = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_lowerCAmelCase : Union[str, Any] = sorted_examples[0]
def is_too_big(_A ):
return tok(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_lowerCAmelCase : Any = new_src + """ """ + src
_lowerCAmelCase : str = new_tgt + """ """ + tgt
if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : int = src, tgt
else: # can fit, keep adding
_lowerCAmelCase : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
return finished_src, finished_tgt
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = Path(SCREAMING_SNAKE_CASE__ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ["train"]:
_lowerCAmelCase : List[str] = data_dir / f'{split}.source', data_dir / f'{split}.target'
_lowerCAmelCase : Tuple = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
_lowerCAmelCase : int = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
_lowerCAmelCase : Any = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.' )
Path(save_path / f'{split}.source' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
Path(save_path / f'{split}.target' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
for split in ["val", "test"]:
_lowerCAmelCase : Union[str, Any] = data_dir / f'{split}.source', data_dir / f'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.target' )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE__ , default=1_2_8 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 704 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase : List[str] = TypeVar("""T""")
lowerCAmelCase : int = Union[List[T], Tuple[T, ...]]
lowerCAmelCase : str = Union[T, List[T], Dict[str, T]]
lowerCAmelCase : List[str] = Union[str, bytes, os.PathLike]
| 705 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = """▁"""
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 10_24,
}
# fmt: off
lowerCAmelCase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Dict = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
_lowerCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Any = len(self.sp_model )
_lowerCAmelCase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
_lowerCAmelCase : str = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else 'en_XX'
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Union[str, Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a ( self , snake_case__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = ''
_lowerCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : Any = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
_lowerCAmelCase : Dict = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : str = src_lang
_lowerCAmelCase : Optional[Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : Any = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Dict = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "en_XX" , snake_case__ = None , snake_case__ = "ro_RO" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[src_lang]
_lowerCAmelCase : int = [self.cur_lang_code_id]
_lowerCAmelCase : List[Any] = [self.eos_token_id]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase : Any = [self.cur_lang_code_id]
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
| 706 |
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Tuple = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase : List[str] = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : str = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = set()
_lowerCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Dict = char
return pairs
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
_lowerCAmelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
_lowerCAmelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
_lowerCAmelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
_lowerCAmelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
_lowerCAmelCase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : int = json.load(__a )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : int = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : List[Any] = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
_lowerCAmelCase : Any = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Optional[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.encoder )
def a ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = tuple(__a )
_lowerCAmelCase : List[Any] = get_pairs(__a )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(__a , key=lambda snake_case__ : self.bpe_ranks.get(__a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Union[str, Any] = bigram
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[Any] = 0
while i < len(__a ):
try:
_lowerCAmelCase : Dict = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : str = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : str = tuple(__a )
_lowerCAmelCase : Optional[int] = new_word
if len(__a ) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a )
_lowerCAmelCase : str = " ".join(__a )
_lowerCAmelCase : Dict = word
return word
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = []
for token in re.findall(self.pat , __a ):
_lowerCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(' ' ) )
return bpe_tokens
def a ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(__a )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "".join(__a )
_lowerCAmelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Tuple = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Dict = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
_lowerCAmelCase : str = 0
with open(__a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Union[str, Any] = token_index
writer.write(' '.join(__a ) + '\n' )
index += 1
return vocab_file, merge_file
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__=False , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
_lowerCAmelCase : List[Any] = " " + text
return (text, kwargs)
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = PaddingStrategy.DO_NOT_PAD , snake_case__ = None , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Any = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : str = len(encoded_inputs['global_attention_mask'] ) != len(__a )
if needs_to_be_padded:
_lowerCAmelCase : List[str] = len(__a ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 707 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 | 0 |
'''simple docstring'''
import baseaa
def lowercase (_A ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def lowercase (_A ) -> str:
"""simple docstring"""
return baseaa.baadecode(__UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = """Hello World!"""
lowerCAmelCase : Tuple = baseaa_encode(test)
print(encoded)
lowerCAmelCase : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 708 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 0 |
'''simple docstring'''
from math import sqrt
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
_lowerCAmelCase : List[str] = True
# 0 and 1 are none primes.
if number <= 1:
_lowerCAmelCase : List[Any] = False
for divisor in range(2 , int(round(sqrt(_A ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowerCAmelCase : Optional[Any] = False
break
# precondition
assert isinstance(_A , _A ), "'status' must been from type bool"
return status
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowerCAmelCase : Dict = list(range(2 , n + 1 ) )
_lowerCAmelCase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_A ) ):
for j in range(i + 1 , len(_A ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowerCAmelCase : Dict = 0
# filters actual prime numbers.
_lowerCAmelCase : Optional[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
_lowerCAmelCase : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_A ):
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and number >= 0, "'number' must been an int and >= 0"
_lowerCAmelCase : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_A )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_A ):
while quotient != 1:
if is_prime(_A ) and (quotient % factor == 0):
ans.append(_A )
quotient /= factor
else:
factor += 1
else:
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCAmelCase : Any = 0
# prime factorization of 'number'
_lowerCAmelCase : Optional[int] = prime_factorization(_A )
_lowerCAmelCase : List[str] = max(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCAmelCase : List[Any] = 0
# prime factorization of 'number'
_lowerCAmelCase : List[Any] = prime_factorization(_A )
_lowerCAmelCase : int = min(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _A ), "compare bust been from type bool"
return number % 2 == 0
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _A ), "compare bust been from type bool"
return number % 2 != 0
def lowercase (_A ):
"""simple docstring"""
assert (
isinstance(_A , _A ) and (number > 2) and is_even(_A )
), "'number' must been an int, even and > 2"
_lowerCAmelCase : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowerCAmelCase : int = get_prime_numbers(_A )
_lowerCAmelCase : List[str] = len(_A )
# run variable for while-loops.
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
_lowerCAmelCase : Union[str, Any] = True
while i < len_pn and loop:
_lowerCAmelCase : Union[str, Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowerCAmelCase : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_A , _A )
and (len(_A ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase (_A , _A ):
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowerCAmelCase : str = 0
while numbera != 0:
_lowerCAmelCase : List[Any] = numbera % numbera
_lowerCAmelCase : Any = numbera
_lowerCAmelCase : Union[str, Any] = rest
# precondition
assert isinstance(_A , _A ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase (_A , _A ):
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowerCAmelCase : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowerCAmelCase : Optional[int] = prime_factorization(_A )
_lowerCAmelCase : int = prime_factorization(_A )
elif numbera == 1 or numbera == 1:
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : int = max(_A , _A )
_lowerCAmelCase : int = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowerCAmelCase : Tuple = prime_fac_a.count(_A )
_lowerCAmelCase : Optional[Any] = prime_fac_a.count(_A )
for _ in range(max(_A , _A ) ):
ans *= n
else:
_lowerCAmelCase : Optional[Any] = prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowerCAmelCase : Any = prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# precondition
assert isinstance(_A , _A ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'number' must been a positive int"
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_A ):
ans += 1
# precondition
assert isinstance(_A , _A ) and is_prime(
_A ), "'ans' must been a prime number and from type int"
return ans
def lowercase (_A , _A ):
"""simple docstring"""
assert (
is_prime(_A ) and is_prime(_A ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowerCAmelCase : Union[str, Any] = p_number_a + 1 # jump to the next number
_lowerCAmelCase : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_A ):
number += 1
while number < p_number_a:
ans.append(_A )
number += 1
# fetch the next prime number.
while not is_prime(_A ):
number += 1
# precondition
assert (
isinstance(_A , _A )
and ans[0] != p_number_a
and ans[len(_A ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 1), "'n' must been int and >= 1"
_lowerCAmelCase : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_A )
# precondition
assert ans[0] == 1 and ans[len(_A ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number > 1
), "'number' must been an int and >= 1"
_lowerCAmelCase : Union[str, Any] = get_divisors(_A )
# precondition
assert (
isinstance(_A , _A )
and (divisors[0] == 1)
and (divisors[len(_A ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase (_A , _A ):
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowerCAmelCase : Optional[int] = gcd(abs(_A ) , abs(_A ) )
# precondition
assert (
isinstance(_A , _A )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'n' must been a int and >= 0"
_lowerCAmelCase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (n >= 0), "'n' must been an int and >= 0"
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : str = 1 # this will be return
for _ in range(n - 1 ):
_lowerCAmelCase : Union[str, Any] = ans
ans += fiba
_lowerCAmelCase : Any = tmp
return ans
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : Dict = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Dict = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class UpperCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = BartTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_lowerCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(_a , pre_tok_state.pop('type' ) )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**_a )
_lowerCAmelCase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase : int = """post_processor"""
_lowerCAmelCase : Optional[Any] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : str = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : List[str] = tuple(state['cls'] )
_lowerCAmelCase : str = False
if state.get('add_prefix_space' , _a ) != add_prefix_space:
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : Dict = True
if state.get('trim_offsets' , _a ) != trim_offsets:
_lowerCAmelCase : List[Any] = trim_offsets
_lowerCAmelCase : Union[str, Any] = True
if changes_to_apply:
_lowerCAmelCase : List[str] = getattr(_a , state.pop('type' ) )
_lowerCAmelCase : int = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_lowerCAmelCase : Any = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_a , **_a )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_a , **_a )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('String lengths must match!' )
_lowerCAmelCase : Any = 0
for chara, chara in zip(__UpperCamelCase , __UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=2 , snake_case__=7 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=2 , snake_case__=3 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[str] = use_attention_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Optional[int] = rescale_embeddings
_lowerCAmelCase : List[str] = attention_type
_lowerCAmelCase : int = use_bias
_lowerCAmelCase : Optional[int] = block_size
_lowerCAmelCase : Optional[Any] = num_random_blocks
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = None
if self.use_attention_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def a ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(_lowerCamelCase )
def a ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(snake_case__ , snake_case__=None , **snake_case__ ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : List[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : int = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=1E-5 , snake_case__="outputs" , snake_case__=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 712 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house'
_lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] )
_lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowerCAmelCase : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state']
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 630 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def a ( *snake_case__ , **snake_case__ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_lowerCAmelCase : str = [
{
"image": Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = vqa_pipeline(snake_case__ , top_k=1 )
self.assertEqual(
snake_case__ , [
[{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}],
[{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}],
] , )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_lowerCAmelCase : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_lowerCAmelCase : Optional[int] = "How many cats are there?"
_lowerCAmelCase : str = vqa_pipeline(image=snake_case__ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
snake_case__ , [{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}, {'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}] )
_lowerCAmelCase : Optional[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
snake_case__ , [{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}, {'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}] )
@slow
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
_lowerCAmelCase : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_lowerCAmelCase : Union[str, Any] = "How many cats are there?"
_lowerCAmelCase : List[Any] = vqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_lowerCAmelCase : Optional[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_lowerCAmelCase : Tuple = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def a ( self ):
'''simple docstring'''
pass
| 713 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowerCAmelCase : Any = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase : Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : List[Any] = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
lowerCAmelCase : List[Any] = model.state_dict()
lowerCAmelCase : Optional[int] = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCAmelCase : Dict = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCAmelCase : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase : str = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCAmelCase : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCAmelCase : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCAmelCase : str = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCAmelCase : Optional[int] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCAmelCase : str = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCAmelCase : Tuple = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCAmelCase : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCAmelCase : str = state_dict["""cls.predictions.decoder.weight"""]
lowerCAmelCase : Dict = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : List[str] = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCAmelCase : List[Any] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
from math import pi
def lowercase (_A , _A ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
import functools
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = len(_A )
_lowerCAmelCase : Optional[Any] = len(_A )
@functools.cache
def min_distance(_A , _A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_lowerCAmelCase : str = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _A ) , 1 + min_distance(_A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = 0 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = row, column
_lowerCAmelCase : Any = [[default_value for c in range(__A )] for r in range(__A )]
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
_lowerCAmelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : Any = max(__A , len(str(__A ) ) )
_lowerCAmelCase : Union[str, Any] = F'%{max_element_length}s'
# Make string and return
def single_line(snake_case__ ) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : List[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def a ( self , snake_case__ ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(__A )
_lowerCAmelCase : int = value
def __add__( self , snake_case__ ):
'''simple docstring'''
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Tuple = self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Any = -self[r, c]
return result
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + (-another)
def __mul__( self , snake_case__ ):
'''simple docstring'''
if isinstance(__A , (int, float) ): # Scalar multiplication
_lowerCAmelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : str = F'Unsupported type given for another ({type(__A )})'
raise TypeError(__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Union[str, Any] = self[r, c]
return result
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : Optional[int] = v.transpose()
_lowerCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Dict = 1
print(f'a^(-1) is {ainv}' )
# u, v
_lowerCAmelCase : List[str] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = 1, 2, -3
_lowerCAmelCase : Tuple = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowercase , _lowercase )}' )
def lowercase ():
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase : Tuple = r"""\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"""
@add_start_docstrings(__snake_case )
class UpperCamelCase__ ( __snake_case ):
"""simple docstring"""
__magic_name__ = "rag"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=" / " , snake_case__=" // " , snake_case__=5 , snake_case__=300 , snake_case__=768 , snake_case__=8 , snake_case__="wiki_dpr" , snake_case__="train" , snake_case__="compressed" , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=0.0 , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
bos_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , is_encoder_decoder=_lowercase , prefix=_lowercase , vocab_size=_lowercase , **_lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : Optional[int] = kwargs.pop('question_encoder' )
_lowerCAmelCase : List[str] = question_encoder_config.pop('model_type' )
_lowerCAmelCase : List[Any] = kwargs.pop('generator' )
_lowerCAmelCase : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
_lowerCAmelCase : Dict = reduce_loss
_lowerCAmelCase : Tuple = label_smoothing
_lowerCAmelCase : List[Any] = exclude_bos_score
_lowerCAmelCase : Optional[int] = do_marginalize
_lowerCAmelCase : int = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : str = n_docs
_lowerCAmelCase : int = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : Optional[int] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Tuple = retrieval_vector_size
_lowerCAmelCase : Dict = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Optional[Any] = index_path
_lowerCAmelCase : int = use_dummy_dataset
_lowerCAmelCase : List[str] = output_retrieved
_lowerCAmelCase : List[Any] = do_deduplication
_lowerCAmelCase : int = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : str = getattr(self.generator , 'forced_eos_token_id' , _lowercase )
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowercase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
def lowercase (_A , _A , _A ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
_lowerCAmelCase : Optional[Any] = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
_lowerCAmelCase : str = -1
return False
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'BlipImageProcessor'
__magic_name__ = 'AutoTokenizer'
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(_a , _a )
# add QFormer tokenizer
_lowerCAmelCase : Dict = qformer_tokenizer
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase : str = BatchFeature()
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
encoding.update(_a )
_lowerCAmelCase : List[Any] = self.qformer_tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
_lowerCAmelCase : Union[str, Any] = qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase : Optional[int] = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer.model_input_names
_lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_a , exist_ok=_a )
_lowerCAmelCase : Optional[int] = os.path.join(_a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a , **_a )
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_a , subfolder='qformer_tokenizer' )
_lowerCAmelCase : Any = cls._get_arguments_from_pretrained(_a , **_a )
args.append(_a )
return cls(*_a )
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowercase (_A , _A ) -> str:
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
from math import factorial, pi
def lowercase (_A , _A = 3_0 ):
"""simple docstring"""
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_lowerCAmelCase : Dict = float(_A )
_lowerCAmelCase : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def lowercase (_A , _A = 3_0 ):
"""simple docstring"""
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_lowerCAmelCase : Any = float(_A )
_lowerCAmelCase : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
from itertools import permutations
def lowercase (_A ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowerCAmelCase : Any = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def lowercase (_A = 1_0 ):
"""simple docstring"""
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
def lowercase (_A , _A , _A , _A , _A , ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_lowerCAmelCase : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
_lowerCAmelCase : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCAmelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __A ):
"""simple docstring"""
__magic_name__ = (KDPMaDiscreteScheduler,)
__magic_name__ = 1_0
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Any = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Dict = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : List[Any] = output.prev_sample
_lowerCAmelCase : Tuple = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def a ( self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Tuple = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Dict = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def a ( self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : Dict = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : str = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = output.prev_sample
_lowerCAmelCase : Tuple = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Any = torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 704 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionPanoramaPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_lowerCAmelCase : Dict = DDIMScheduler()
torch.manual_seed(0 )
_lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase : int = CLIPTextModel(_A )
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Dict = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**_A )
_lowerCAmelCase : List[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : str = sd_pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**_A )
_lowerCAmelCase : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Optional[Any] = 'french fries'
_lowerCAmelCase : Optional[int] = sd_pipe(**_A , negative_prompt=_A )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionPanoramaPipeline(**_A )
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[Any] = sd_pipe(**_A , view_batch_size=2 )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**_A )
_lowerCAmelCase : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = sd_pipe(**_A ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=_A )
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**_A )
_lowerCAmelCase : Any = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = sd_pipe(**_A ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
_lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**_A ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : List[str] = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=_A )
_lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[Any] = self.get_inputs()
_lowerCAmelCase : List[str] = pipe(**_A ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
_lowerCAmelCase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : int = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowerCAmelCase : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : str = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : List[Any] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
_lowerCAmelCase : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
_lowerCAmelCase : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : str = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : List[Any] = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : Dict = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
_lowerCAmelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
_lowerCAmelCase : str = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : Dict = self.get_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**_A )
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 705 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowercase (_A ):
"""simple docstring"""
def decorator(_A ):
_lowerCAmelCase : Dict = getattr(lowerCAmelCase__ , 'handle_key' , [] )
handle += [key]
setattr(lowerCAmelCase__ , 'handle_key' , lowerCAmelCase__ )
return func
return decorator
def lowercase (*_A ):
"""simple docstring"""
def decorator(_A ):
_lowerCAmelCase : List[str] = getattr(lowerCAmelCase__ , 'handle_key' , [] )
handle += keys
setattr(lowerCAmelCase__ , 'handle_key' , lowerCAmelCase__ )
return func
return decorator
class UpperCamelCase__ ( __a ):
"""simple docstring"""
def __new__( cls , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , 'key_handler' ):
setattr(snake_case__ , 'key_handler' , {} )
setattr(snake_case__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCAmelCase : List[str] = getattr(snake_case__ , 'handle_key' , [] )
for key in handled_keys:
_lowerCAmelCase : Optional[int] = value
return new_cls
@staticmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase : Tuple = ord(snake_case__ )
_lowerCAmelCase : Dict = cls.key_handler.get(snake_case__ )
if handler:
_lowerCAmelCase : str = char
return handler(cls )
else:
return None
def lowercase (cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 706 |
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__magic_name__ = '''canine'''
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_6384 , snake_case__=16 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__=0xE0_00 , snake_case__=0xE0_01 , snake_case__=4 , snake_case__=4 , snake_case__=8 , snake_case__=1_6384 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : List[str] = layer_norm_eps
# Character config:
_lowerCAmelCase : List[Any] = downsampling_rate
_lowerCAmelCase : Union[str, Any] = upsampling_kernel_size
_lowerCAmelCase : Optional[Any] = num_hash_functions
_lowerCAmelCase : str = num_hash_buckets
_lowerCAmelCase : int = local_transformer_stride
| 707 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MobileBertTokenizer
__magic_name__ = MobileBertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
__magic_name__ = "google/mobilebert-uncased"
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'UNwant\u00E9d,running'
_lowerCAmelCase : Dict = 'unwanted, running'
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 12, 10, 11] )
def a ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = 'UNwant\u00E9d,running'
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : int = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : str = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# With lower casing
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(do_lower_case=snake_case__ )
_lowerCAmelCase : Tuple = self.get_rust_tokenizer(do_lower_case=snake_case__ )
_lowerCAmelCase : Optional[Any] = 'UNwant\u00E9d,running'
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(snake_case__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = tokenizer.encode(snake_case__ )
_lowerCAmelCase : str = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCAmelCase : Optional[Any] = {}
for i, token in enumerate(snake_case__ ):
_lowerCAmelCase : int = i
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_lowerCAmelCase : Any = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ )
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Any = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCAmelCase : Optional[int] = tokenizer_r.encode_plus(
snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , )
_lowerCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(snake_case__ , 'do_lower_case' ) else False
_lowerCAmelCase : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['的', '人', '有']
_lowerCAmelCase : str = ''.join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : int = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : Any = False
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : str = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : int = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Dict = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 708 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase : List[Any] = 1_2_8
elif "12-12" in model_name:
_lowerCAmelCase : Dict = 1_2
_lowerCAmelCase : str = 1_2
elif "14-14" in model_name:
_lowerCAmelCase : Tuple = 1_4
_lowerCAmelCase : Tuple = 1_4
elif "16-16" in model_name:
_lowerCAmelCase : Optional[Any] = 1_6
_lowerCAmelCase : Union[str, Any] = 1_6
else:
raise ValueError('Model not supported' )
_lowerCAmelCase : Dict = 'huggingface/label-files'
if "speech-commands" in model_name:
_lowerCAmelCase : Optional[int] = 3_5
_lowerCAmelCase : str = 'speech-commands-v2-id2label.json'
else:
_lowerCAmelCase : Dict = 5_2_7
_lowerCAmelCase : List[str] = 'audioset-id2label.json'
_lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : int = idalabel
_lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase (_A ):
"""simple docstring"""
if "module.v" in name:
_lowerCAmelCase : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
_lowerCAmelCase : Dict = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
_lowerCAmelCase : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_lowerCAmelCase : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase : List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_lowerCAmelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCAmelCase : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : str = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : Optional[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Any = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase : str = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase : Any = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (_A , _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Any = orig_state_dict.pop(_A )
if "qkv" in key:
_lowerCAmelCase : Union[str, Any] = key.split('.' )
_lowerCAmelCase : List[Any] = int(key_split[3] )
_lowerCAmelCase : List[Any] = config.hidden_size
if "weight" in key:
_lowerCAmelCase : str = val[:dim, :]
_lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
_lowerCAmelCase : Tuple = val[-dim:, :]
else:
_lowerCAmelCase : List[Any] = val[:dim]
_lowerCAmelCase : List[Any] = val[dim : dim * 2]
_lowerCAmelCase : str = val[-dim:]
else:
_lowerCAmelCase : Any = val
return orig_state_dict
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
@torch.no_grad()
def lowercase (_A , _A , _A=False ):
"""simple docstring"""
_lowerCAmelCase : Any = get_audio_spectrogram_transformer_config(_A )
_lowerCAmelCase : Union[str, Any] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
_lowerCAmelCase : Any = model_name_to_url[model_name]
_lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(_A , map_location='cpu' )
# remove some keys
remove_keys(_A )
# rename some keys
_lowerCAmelCase : List[Any] = convert_state_dict(_A , _A )
# load 🤗 model
_lowerCAmelCase : Optional[int] = ASTForAudioClassification(_A )
model.eval()
model.load_state_dict(_A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase : List[Any] = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
_lowerCAmelCase : List[str] = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
_lowerCAmelCase : Any = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8
_lowerCAmelCase : Optional[Any] = ASTFeatureExtractor(mean=_A , std=_A , max_length=_A )
if "speech-commands" in model_name:
_lowerCAmelCase : List[str] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
_lowerCAmelCase : int = dataset[0]['audio']['array']
else:
_lowerCAmelCase : Optional[int] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = torchaudio.load(_A )
_lowerCAmelCase : Any = waveform.squeeze().numpy()
_lowerCAmelCase : Tuple = feature_extractor(_A , sampling_rate=1_6_0_0_0 , return_tensors='pt' )
# forward pass
_lowerCAmelCase : List[Any] = model(**_A )
_lowerCAmelCase : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase : int = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase : Optional[Any] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase : Optional[Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase : Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase : int = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase : int = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase : int = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase : List[str] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , _A , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(_A )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : int = 16
lowerCAmelCase : List[str] = 32
def lowercase (_A , _A = 1_6 , _A = "bert-base-cased" ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
_lowerCAmelCase : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : Union[str, Any] = datasets.map(
A_ , batched=A_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(A_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : str = config['lr']
_lowerCAmelCase : List[str] = int(config['num_epochs'] )
_lowerCAmelCase : int = int(config['seed'] )
_lowerCAmelCase : Dict = int(config['batch_size'] )
_lowerCAmelCase : Any = args.model_name_or_path
set_seed(A_ )
_lowerCAmelCase , _lowerCAmelCase : Any = get_dataloaders(A_ , A_ , A_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(A_ , return_dict=A_ )
# Instantiate optimizer
_lowerCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=A_ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : str = (len(A_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : int = get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=0 , num_training_steps=A_ , )
else:
_lowerCAmelCase : Optional[int] = DummyScheduler(A_ , total_num_steps=A_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Tuple = 0
# Now we train the model
_lowerCAmelCase : Dict = evaluate.load('glue' , 'mrpc' )
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[str] = {}
for epoch in range(A_ , A_ ):
model.train()
for step, batch in enumerate(A_ ):
_lowerCAmelCase : Dict = model(**A_ )
_lowerCAmelCase : Union[str, Any] = outputs.loss
_lowerCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCAmelCase : str = 0
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Any = model(**A_ )
_lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A_ ) - 1:
_lowerCAmelCase : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A_ , references=A_ , )
_lowerCAmelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , A_ )
_lowerCAmelCase : Union[str, Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowerCAmelCase : Dict = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(A_ , A_ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A_ , )
parser.add_argument(
'--output_dir' , type=A_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=A_ , default=A_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=A_ , default=3 , help='Number of train epochs.' , )
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : List[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowercase (_A , _A , _A = 1_6_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = int(round(sample_rate * max_length ) )
if len(_A ) <= sample_length:
return wav
_lowerCAmelCase : Any = randint(0 , len(_A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(default=snake_case__ , metadata={"help": "Name of a dataset from the datasets package"} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "A file containing the training audio paths and labels."} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "A file containing the validation audio paths and labels."} )
__magic_name__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
__magic_name__ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
__magic_name__ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
__magic_name__ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
__magic_name__ = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__magic_name__ = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__magic_name__ = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__magic_name__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Name or path of preprocessor config."} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__magic_name__ = field(
default=snake_case__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__magic_name__ = field(
default=snake_case__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , _A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , _A , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--label_column_name` to the correct text column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Union[str, Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(_A ):
_lowerCAmelCase : Any = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Optional[Any] = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_A )
_lowerCAmelCase : Any = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Union[str, Any] = {model_input_name: inputs.get(_A )}
_lowerCAmelCase : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_A ):
_lowerCAmelCase : Dict = [audio['array'] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : Dict = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : List[Any] = {model_input_name: inputs.get(_A )}
_lowerCAmelCase : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : str = raw_datasets['train'].features[data_args.label_column_name].names
_lowerCAmelCase : int = {}, {}
for i, label in enumerate(_A ):
_lowerCAmelCase : str = str(_A )
_lowerCAmelCase : Any = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : str = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_A ):
_lowerCAmelCase : Optional[int] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_A , references=eval_pred.label_ids )
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Tuple = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : int = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_A , output_all_columns=_A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_A , output_all_columns=_A )
# Initialize our trainer
_lowerCAmelCase : str = Trainer(
model=_A , args=_A , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , )
# Training
if training_args.do_train:
_lowerCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Any = last_checkpoint
_lowerCAmelCase : Tuple = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : str = trainer.evaluate()
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Write model card and (optionally) push to hub
_lowerCAmelCase : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : int = ["""text""", """image""", """audio"""]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(__snake_case , __snake_case ):
inputs.append(create_inputs(__snake_case ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = []
for output in outputs:
if isinstance(__snake_case , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__snake_case , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__snake_case , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class UpperCamelCase__ :
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
_lowerCAmelCase : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , __a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCAmelCase : List[str] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_inputs(self.tool.inputs )
_lowerCAmelCase : Optional[int] = self.tool(*__a )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCAmelCase : Any = [outputs]
self.assertListEqual(output_types(__a ) , self.tool.outputs )
def a ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = create_inputs(self.tool.inputs )
_lowerCAmelCase : int = self.tool(*__a )
if not isinstance(__a , __a ):
_lowerCAmelCase : Dict = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
for output, output_type in zip(__a , self.tool.outputs ):
_lowerCAmelCase : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a , __a ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = create_inputs(self.tool.inputs )
_lowerCAmelCase : List[Any] = []
for _input, input_type in zip(__a , self.tool.inputs ):
if isinstance(__a , __a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCAmelCase : Tuple = self.tool(*__a )
if not isinstance(__a , __a ):
_lowerCAmelCase : str = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
| 712 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house'
_lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] )
_lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowerCAmelCase : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state']
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 630 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase (_A="" ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
return os.path.join(lowerCamelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowerCAmelCase : Union[str, Any] = AgentAudio(lowerCamelCase_ )
_lowerCAmelCase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCAmelCase : Union[str, Any] = sf.read(lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ , torch.tensor(lowerCamelCase_ ) , atol=1E-4 ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowerCAmelCase : List[str] = get_new_path(suffix='.wav' )
sf.write(lowerCamelCase_ , lowerCamelCase_ , 1_6000 )
_lowerCAmelCase : Optional[int] = AgentAudio(lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase_ )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.randint(0 , 256 , (64, 64, 3) )
_lowerCAmelCase : Union[str, Any] = AgentImage(lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase_ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / """000000039769.png"""
_lowerCAmelCase : Optional[int] = Image.open(lowerCamelCase_ )
_lowerCAmelCase : str = AgentImage(lowerCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase_ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / """000000039769.png"""
_lowerCAmelCase : List[str] = Image.open(lowerCamelCase_ )
_lowerCAmelCase : Tuple = AgentImage(lowerCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase_ ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = """Hey!"""
_lowerCAmelCase : Tuple = AgentText(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , agent_type.to_string() )
self.assertEqual(lowerCamelCase_ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 713 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 0 |
def lowercase (_A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_lowerCAmelCase : Optional[Any] = sorted(string.lower() )
return len(A__ ) == len(set(A__ ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = input("""Enter a string """).strip()
lowerCAmelCase : Dict = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Any = BlipImageProcessor()
_lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCAmelCase : Optional[int] = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCAmelCase : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = image_processor(snake_case__ , return_tensors='np' )
_lowerCAmelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : List[str] = "lower newer"
_lowerCAmelCase : Dict = processor(text=snake_case__ )
_lowerCAmelCase : Any = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Tuple = "lower newer"
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[Any] = processor.batch_decode(snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : List[Any] = "lower newer"
_lowerCAmelCase : Tuple = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
_lowerCAmelCase : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
_lowerCAmelCase : str = 'A red cat sitting on a park bench'
_lowerCAmelCase : Tuple = np.random.RandomState(0 )
_lowerCAmelCase : Dict = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowercase_ , output_type='np' , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Tuple = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[str] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__magic_name__ = (DPMSolverSDEScheduler,)
__magic_name__ = 1_0
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : List[Any] = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Tuple = output.prev_sample
_lowerCAmelCase : List[str] = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : int = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Dict = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Optional[int] = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : int = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : List[Any] = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : Optional[int] = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : str = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Tuple = output.prev_sample
_lowerCAmelCase : Tuple = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**snake_case__ , use_karras_sigmas=snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : Any = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
_lowerCAmelCase : List[Any] = sample.to(snake_case__ )
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ )
_lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : List[str] = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase : List[str] = """"""
lowerCAmelCase : Dict = """"""
lowerCAmelCase : Optional[Any] = """"""
lowerCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def lowercase ():
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = get_dataset(__a , __a )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = update_image_and_anno(__a , __a , __a )
for index, image in enumerate(__a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : List[str] = random_chars(3_2 )
_lowerCAmelCase : Tuple = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase : Dict = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , __a , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Success {index+1}/{len(__a )} with {file_name}' )
_lowerCAmelCase : List[str] = []
for anno in new_annos[index]:
_lowerCAmelCase : List[Any] = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__a )
with open(f'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = []
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
_lowerCAmelCase : List[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
_lowerCAmelCase : str = in_file.readlines()
_lowerCAmelCase : List[str] = os.path.join(__a , f'{label_name}.jpg' )
_lowerCAmelCase : List[Any] = []
for obj_list in obj_lists:
_lowerCAmelCase : Any = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def lowercase (_A , _A , _A = 1 ):
"""simple docstring"""
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Dict = []
for idx in range(len(__a ) ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = img_list[idx]
path_list.append(__a )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Dict = cva.imread(__a )
if flip_type == 1:
_lowerCAmelCase : int = cva.flip(__a , __a )
for bbox in img_annos:
_lowerCAmelCase : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : Optional[Any] = cva.flip(__a , __a )
for bbox in img_annos:
_lowerCAmelCase : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__a )
new_imgs_list.append(__a )
return new_imgs_list, new_annos_lists, path_list
def lowercase (_A = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Any = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Any = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12}
def lowercase (_A ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase : Any = set()
_lowerCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Dict = char
_lowerCAmelCase : List[str] = set(_A )
return pairs
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="__start__" , snake_case__="__end__" , snake_case__="__unk__" , snake_case__="__null__" , **snake_case__ , ):
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Tuple = json.load(UpperCAmelCase__ )
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : Tuple = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_lowerCAmelCase : Dict = {}
@property
def a ( self ):
'''simple docstring'''
return len(self.encoder )
def a ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Union[str, Any] = re.sub('([.,!?()])' , R' \1' , UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = re.sub('(\')' , R' \1 ' , UpperCAmelCase__ )
_lowerCAmelCase : Optional[int] = re.sub(R'\s{2,}' , ' ' , UpperCAmelCase__ )
if "\n" in token:
_lowerCAmelCase : Tuple = token.replace('\n' , ' __newln__' )
_lowerCAmelCase : Tuple = token.split(' ' )
_lowerCAmelCase : List[Any] = []
for token in tokens:
if not len(UpperCAmelCase__ ):
continue
_lowerCAmelCase : Optional[Any] = token.lower()
_lowerCAmelCase : List[Any] = tuple(UpperCAmelCase__ )
_lowerCAmelCase : Any = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_lowerCAmelCase : List[str] = get_pairs(UpperCAmelCase__ )
if not pairs:
words.append(UpperCAmelCase__ )
continue
while True:
_lowerCAmelCase : Optional[int] = min(UpperCAmelCase__ , key=lambda snake_case__ : self.bpe_ranks.get(UpperCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Dict = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = 0
while i < len(UpperCAmelCase__ ):
try:
_lowerCAmelCase : Any = word.index(UpperCAmelCase__ , UpperCAmelCase__ )
new_word.extend(word[i:j] )
_lowerCAmelCase : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Tuple = tuple(UpperCAmelCase__ )
_lowerCAmelCase : List[str] = new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
_lowerCAmelCase : int = get_pairs(UpperCAmelCase__ )
_lowerCAmelCase : Tuple = '''@@ '''.join(UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = word[:-4]
_lowerCAmelCase : Optional[int] = word
words.append(UpperCAmelCase__ )
return " ".join(UpperCAmelCase__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Union[str, Any] = re.findall(R'\S+\n?' , UpperCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__ ).split(' ' ) ) )
return split_tokens
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = token.lower()
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token ) )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = ''' '''.join(UpperCAmelCase__ ).replace('@@ ' , '' ).strip()
return out_string
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + '\n' )
_lowerCAmelCase : Optional[int] = 0
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase : str = """scheduler_config.json"""
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
__magic_name__ = 5
@dataclass
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
__magic_name__ = 42
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = SCHEDULER_CONFIG_NAME
__magic_name__ = ["dtype"]
__magic_name__ = []
__magic_name__ = True
@classmethod
def a ( cls , snake_case__ = None , snake_case__ = None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase , )
_lowerCAmelCase , _lowerCAmelCase : Tuple = cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
if hasattr(_lowerCamelCase , 'create_state' ) and getattr(_lowerCamelCase , 'has_state' , _lowerCamelCase ):
_lowerCAmelCase : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self , snake_case__ , snake_case__ = False , **snake_case__ ):
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def a ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase : List[str] = importlib.import_module(__name__.split('.' )[0] )
_lowerCAmelCase : Dict = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
def lowercase (_A , _A ):
"""simple docstring"""
assert len(_lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCAmelCase ) - x.ndim) ) , _lowerCAmelCase )
def lowercase (_A , _A=0.999 , _A=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_A ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_lowerCAmelCase : Dict = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase : Any = i / num_diffusion_timesteps
_lowerCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCAmelCase ) / alpha_bar(_lowerCAmelCase ) , _lowerCAmelCase ) )
return jnp.array(_lowerCAmelCase , dtype=_lowerCAmelCase )
@flax.struct.dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase : Optional[int] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase : List[str] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : List[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_lowerCAmelCase : int = 1.0 - betas
_lowerCAmelCase : Tuple = jnp.cumprod(_lowerCamelCase , axis=0 )
return cls(
alphas=_lowerCamelCase , betas=_lowerCamelCase , alphas_cumprod=_lowerCamelCase , )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = state.alphas_cumprod
_lowerCAmelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase : Union[str, Any] = sqrt_alpha_prod.flatten()
_lowerCAmelCase : Any = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
_lowerCAmelCase : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase : Any = sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase : Union[str, Any] = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : int = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase : Optional[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase : int = concatenate_datasets
lowerCAmelCase : int = DownloadConfig
lowerCAmelCase : Optional[int] = DownloadManager
lowerCAmelCase : Optional[Any] = DownloadMode
lowerCAmelCase : Optional[int] = DownloadConfig
lowerCAmelCase : str = DownloadMode
lowerCAmelCase : List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.