code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __a : str ,__a : str ,__a : bool ) -> List[Any]:
"""simple docstring"""
_a : Tuple = FairseqRobertaModel.from_pretrained(__a )
roberta.eval() # disable dropout
_a : List[str] = roberta.model.encoder.sentence_encoder
_a : List[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
_a : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' ,__a )
_a : Optional[int] = XLMRobertaXLForSequenceClassification(__a ) if classification_head else XLMRobertaXLForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
_a : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
_a : int = roberta_sent_encoder.embed_positions.weight
_a : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_a : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
_a : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_a : BertLayer = model.roberta.encoder.layer[i]
_a : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_a : RobertaAttention = layer.attention
_a : str = roberta_layer.self_attn_layer_norm.weight
_a : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
_a : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_a : Dict = roberta_layer.self_attn.q_proj.weight
_a : Optional[int] = roberta_layer.self_attn.q_proj.bias
_a : Optional[int] = roberta_layer.self_attn.k_proj.weight
_a : List[str] = roberta_layer.self_attn.k_proj.bias
_a : Optional[int] = roberta_layer.self_attn.v_proj.weight
_a : List[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_a : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_a : Any = roberta_layer.self_attn.out_proj.weight
_a : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_a : Optional[Any] = roberta_layer.final_layer_norm.weight
_a : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
_a : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_a : List[Any] = roberta_layer.fca.weight
_a : List[str] = roberta_layer.fca.bias
# output
_a : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_a : Dict = roberta_layer.fca.weight
_a : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
_a : str = roberta.model.classification_heads['''mnli'''].dense.weight
_a : int = roberta.model.classification_heads['''mnli'''].dense.bias
_a : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight
_a : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_a : List[Any] = roberta.model.encoder.lm_head.dense.weight
_a : List[Any] = roberta.model.encoder.lm_head.dense.bias
_a : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
_a : str = roberta.model.encoder.lm_head.layer_norm.bias
_a : List[Any] = roberta.model.encoder.lm_head.weight
_a : Optional[int] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_a : torch.Tensor = roberta.encode(__a ).unsqueeze(0 ) # batch of size 1
_a : str = model(__a )[0]
if classification_head:
_a : Union[str, Any] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__a ) )
else:
_a : int = roberta.model(__a )[0]
print(our_output.shape ,their_output.shape )
_a : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_a : List[Any] = torch.allclose(__a ,__a ,atol=1E-3 )
print('''Do both models output the same tensors?''' ,'''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(__a ).mkdir(parents=__a ,exist_ok=__a )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
a__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 235 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : List[Any] = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : Optional[Any] = 2 * i + 1
_a : Optional[int] = 2 * i
_a : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 235 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ : Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 366 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 59 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __A ( UpperCamelCase__ ):
a__ : Dict = ["""image_processor""", """feature_extractor"""]
a__ : List[str] = """TvltImageProcessor"""
a__ : Tuple = """TvltFeatureExtractor"""
def __init__(self : Tuple , __a : List[str] , __a : List[Any] ):
super().__init__(image_processor=__a , feature_extractor=__a )
UpperCAmelCase_ = image_processor
UpperCAmelCase_ = feature_extractor
def __call__(self : Dict , __a : Dict=None , __a : Union[str, Any]=None , __a : str=None , __a : List[str]=None , __a : Tuple=False , __a : Dict=False , *__a : Optional[int] , **__a : List[Any] , ):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
UpperCAmelCase_ = None
if images is not None:
UpperCAmelCase_ = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
UpperCAmelCase_ = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
UpperCAmelCase_ = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def _lowercase (self : Any ):
UpperCAmelCase_ = self.image_processor.model_input_names
UpperCAmelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , ):
__magic_name__ : Tuple = size if size is not None else {"height": 18, "width": 18}
__magic_name__ : Optional[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : str = num_channels
__magic_name__ : Optional[int] = image_size
__magic_name__ : Union[str, Any] = min_resolution
__magic_name__ : int = max_resolution
__magic_name__ : Optional[int] = do_resize
__magic_name__ : Dict = size
__magic_name__ : Optional[int] = do_normalize
def SCREAMING_SNAKE_CASE ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "clusters" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
__magic_name__ : str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , obj[key] ) )
else:
self.assertEqual(obj[key] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Optional[int] = os.path.join(_a , "image_processor.json" )
image_processor_first.to_json_file(_a )
__magic_name__ : Union[str, Any] = self.image_processing_class.from_json_file(_a ).to_dict()
__magic_name__ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_a )
__magic_name__ : Optional[int] = self.image_processing_class.from_pretrained(_a ).to_dict()
__magic_name__ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
@unittest.skip("ImageGPT requires clusters at initialization" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
__magic_name__ : List[Any] = Image.open(dataset[4]["file"] )
__magic_name__ : Tuple = Image.open(dataset[5]["file"] )
__magic_name__ : Dict = [imagea, imagea]
return images
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
__magic_name__ : Optional[Any] = prepare_images()
# test non-batched
__magic_name__ : str = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
__magic_name__ : List[str] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _a )
# test batched
__magic_name__ : int = image_processing(_a , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
__magic_name__ : Dict = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _a )
| 41 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = hidden_size if embedding_size is None else embedding_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[int] = rotary_value
__magic_name__ : List[Any] = use_cache
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : str = {0: "batch", 1: "sequence"}
__magic_name__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=None , lowerCamelCase__=2 , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =parent
__UpperCamelCase : Union[str, Any] =batch_size
__UpperCamelCase : List[str] =image_size
__UpperCamelCase : int =patch_size
__UpperCamelCase : Optional[int] =num_channels
__UpperCamelCase : Optional[Any] =is_training
__UpperCamelCase : List[str] =use_labels
__UpperCamelCase : List[Any] =hidden_size
__UpperCamelCase : Union[str, Any] =num_hidden_layers
__UpperCamelCase : Any =num_attention_heads
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : Tuple =hidden_act
__UpperCamelCase : Tuple =hidden_dropout_prob
__UpperCamelCase : Dict =attention_probs_dropout_prob
__UpperCamelCase : List[str] =type_sequence_label_size
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Union[str, Any] =scope
__UpperCamelCase : List[str] =encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Tuple =(image_size // patch_size) ** 2
__UpperCamelCase : int =num_patches + 1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : str =None
if self.use_labels:
__UpperCamelCase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[str] =self.get_config()
return config, pixel_values, labels
def __lowercase ( self ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =ViTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =ViTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase : Optional[Any] =1
__UpperCamelCase : List[str] =ViTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : Optional[int] =model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.type_sequence_label_size
__UpperCamelCase : Union[str, Any] =ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Any =model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase : int =1
__UpperCamelCase : str =ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : List[Any] =model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any =config_and_inputs
__UpperCamelCase : str ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Optional[int] =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any =True
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : str =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =ViTModelTester(self )
__UpperCamelCase : List[str] =ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : int =model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : Union[str, Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] =model_class(lowerCamelCase__ )
__UpperCamelCase : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[Any] =[*signature.parameters.keys()]
__UpperCamelCase : str =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =ViTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> Union[str, Any]:
__UpperCamelCase : int =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.default_image_processor
__UpperCamelCase : List[str] =prepare_img()
__UpperCamelCase : Optional[int] =image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCamelCase : int =model(**lowerCamelCase__ )
# verify the logits
__UpperCamelCase : Any =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =ViTModel.from_pretrained('facebook/dino-vits8' ).to(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
__UpperCamelCase : Optional[Any] =prepare_img()
__UpperCamelCase : Any =image_processor(images=lowerCamelCase__ , return_tensors='pt' )
__UpperCamelCase : int =inputs.pixel_values.to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCamelCase : Any =model(lowerCamelCase__ , interpolate_pos_encoding=lowerCamelCase__ )
# verify the logits
__UpperCamelCase : str =torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
__UpperCamelCase : Optional[int] =self.default_image_processor
__UpperCamelCase : Dict =prepare_img()
__UpperCamelCase : List[Any] =image_processor(images=lowerCamelCase__ , return_tensors='pt' )
__UpperCamelCase : List[str] =inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__UpperCamelCase : List[Any] =model(lowerCamelCase__ )
| 71 |
A_ :Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def A ( a_ ) -> str:
assert type(a_ ) in (int, float) and decimal == int(a_ )
__UpperCamelCase : Union[str, Any] =int(a_ )
__UpperCamelCase : List[str] =''
__UpperCamelCase : Optional[Any] =False
if decimal < 0:
__UpperCamelCase : Tuple =True
decimal *= -1
while decimal > 0:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =divmod(a_ ,16 )
__UpperCamelCase : Tuple =values[remainder] + hexadecimal
__UpperCamelCase : Dict ='0x' + hexadecimal
if negative:
__UpperCamelCase : int ='-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase ) -> YolosConfig:
A: List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A: List[str] = 1_9_2
A: Dict = 7_6_8
A: Optional[Any] = 1_2
A: str = 3
A: Optional[int] = [8_0_0, 1_3_3_3]
A: Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
A: Dict = 3_3_0
A: Any = 1_4
A: Union[str, Any] = 6
A: int = 1_3_2_0
elif "yolos_s" in yolos_name:
A: Optional[int] = 3_8_4
A: List[str] = 1_5_3_6
A: Dict = 1_2
A: Dict = 6
elif "yolos_b" in yolos_name:
A: Dict = [8_0_0, 1_3_4_4]
A: List[Any] = 9_1
A: Union[str, Any] = '''huggingface/label-files'''
A: Dict = '''coco-detection-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: int = {int(__lowercase ): v for k, v in idalabel.items()}
A: List[Any] = idalabel
A: List[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: Optional[int] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A: int = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Optional[Any] = in_proj_weight[: config.hidden_size, :]
A: List[str] = in_proj_bias[: config.hidden_size]
A: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Dict = in_proj_weight[-config.hidden_size :, :]
A: List[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
if "backbone" in name:
A: List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
A: str = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
A: Union[str, Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
A: Any = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
A: str = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A: List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
A: str = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
A: int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A: Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A: Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A: List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A: Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A: List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
A: List[str] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
A: Tuple = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
A: Tuple = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> dict:
for key in orig_state_dict.copy().keys():
A: Optional[Any] = orig_state_dict.pop(__lowercase )
if "qkv" in key:
A: Any = key.split('''.''' )
A: Optional[int] = int(key_split[2] )
A: Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A: str = val[:dim, :]
A: int = val[
dim : dim * 2, :
]
A: Union[str, Any] = val[-dim:, :]
else:
A: Any = val[:dim]
A: Optional[Any] = val[dim : dim * 2]
A: Dict = val[-dim:]
else:
A: Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE( ) -> torch.Tensor:
A: int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: int = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Optional[int]:
A: int = get_yolos_config(__lowercase )
# load original state_dict
A: Tuple = torch.load(__lowercase , map_location='''cpu''' )['''model''']
# load 🤗 model
A: List[str] = YolosForObjectDetection(__lowercase )
model.eval()
A: List[Any] = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
A: Tuple = 8_0_0 if yolos_name != '''yolos_ti''' else 5_1_2
A: Any = YolosImageProcessor(format='''coco_detection''' , size=__lowercase )
A: Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
A: Union[str, Any] = model(**__lowercase )
A , A: Dict = outputs.logits, outputs.pred_boxes
A , A: Tuple = None, None
if yolos_name == "yolos_ti":
A: List[Any] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
A: List[Any] = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
A: Tuple = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
A: List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
A: Optional[int] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
A: Optional[int] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
A: Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
A: List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
A: Any = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
A: str = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1E-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
A: List[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
A: Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization='''hustvl''' )
model.push_to_hub(__lowercase , organization='''hustvl''' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 334 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : int=1_3 , UpperCamelCase__ : Union[str, Any]=3_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=1_0 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , )-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: str = batch_size
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: Any = patch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: int = is_training
__lowerCAmelCase: Optional[Any] = use_labels
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Optional[int] = num_hidden_layers
__lowerCAmelCase: Union[str, Any] = num_attention_heads
__lowerCAmelCase: Any = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: List[Any] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = type_sequence_label_size
__lowerCAmelCase: int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase: Optional[Any] = (image_size // patch_size) ** 2
__lowerCAmelCase: Dict = num_patches + 1
def lowercase_ ( self : Optional[Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase: Any = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Any = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase_ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = FlaxBeitModel(config=UpperCamelCase__)
__lowerCAmelCase: List[str] = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = FlaxBeitForMaskedImageModeling(config=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def lowercase_ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.type_sequence_label_size
__lowerCAmelCase: Tuple = FlaxBeitForImageClassification(config=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCAmelCase: List[str] = 1
__lowerCAmelCase: List[str] = FlaxBeitForImageClassification(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__)
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = config_and_inputs
__lowerCAmelCase: Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase_ ( self : Union[str, Any])-> None:
'''simple docstring'''
__lowerCAmelCase: int = FlaxBeitModelTester(self)
__lowerCAmelCase: Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Any = model_class(UpperCamelCase__)
__lowerCAmelCase: Tuple = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__)
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Dict = model_class(UpperCamelCase__)
@jax.jit
def model_jitted(UpperCamelCase__ : int , **UpperCamelCase__ : str):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__)
with self.subTest("JIT Enabled"):
__lowerCAmelCase: int = model_jitted(**UpperCamelCase__).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__lowerCAmelCase: Optional[Any] = model_jitted(**UpperCamelCase__).to_tuple()
self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__))
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__)
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__)
@slow
def lowercase_ ( self : int)-> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase: List[str] = model_class_name.from_pretrained("microsoft/beit-base-patch16-224")
__lowerCAmelCase: List[str] = model(np.ones((1, 3, 2_2_4, 2_2_4)))
self.assertIsNotNone(UpperCamelCase__)
def a__ ( ) -> Optional[Any]:
__lowerCAmelCase: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class snake_case ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Optional[int])-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None
@slow
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
__lowerCAmelCase: List[Any] = self.default_image_processor
__lowerCAmelCase: Union[str, Any] = prepare_img()
__lowerCAmelCase: Tuple = image_processor(images=UpperCamelCase__ , return_tensors="np").pixel_values
# prepare bool_masked_pos
__lowerCAmelCase: Dict = np.ones((1, 1_9_6) , dtype=UpperCamelCase__)
# forward pass
__lowerCAmelCase: Any = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__)
__lowerCAmelCase: Dict = outputs.logits
# verify the logits
__lowerCAmelCase: Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , UpperCamelCase__)
__lowerCAmelCase: List[Any] = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]])
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2))
@slow
def lowercase_ ( self : Tuple)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
__lowerCAmelCase: Optional[int] = self.default_image_processor
__lowerCAmelCase: Dict = prepare_img()
__lowerCAmelCase: Tuple = image_processor(images=UpperCamelCase__ , return_tensors="np")
# forward pass
__lowerCAmelCase: int = model(**UpperCamelCase__)
__lowerCAmelCase: Dict = outputs.logits
# verify the logits
__lowerCAmelCase: Optional[int] = (1, 1_0_0_0)
self.assertEqual(logits.shape , UpperCamelCase__)
__lowerCAmelCase: int = np.array([-1.2385, -1.0987, -1.0108])
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4))
__lowerCAmelCase: int = 2_8_1
self.assertEqual(logits.argmax(-1).item() , UpperCamelCase__)
@slow
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k")
__lowerCAmelCase: Union[str, Any] = self.default_image_processor
__lowerCAmelCase: Any = prepare_img()
__lowerCAmelCase: str = image_processor(images=UpperCamelCase__ , return_tensors="np")
# forward pass
__lowerCAmelCase: Union[str, Any] = model(**UpperCamelCase__)
__lowerCAmelCase: Dict = outputs.logits
# verify the logits
__lowerCAmelCase: Tuple = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , UpperCamelCase__)
__lowerCAmelCase: str = np.array([1.6881, -0.2787, 0.5901])
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4))
__lowerCAmelCase: str = 2_3_9_6
self.assertEqual(logits.argmax(-1).item() , UpperCamelCase__)
| 217 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Tuple = str(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 9 and set(__SCREAMING_SNAKE_CASE ) == set("123456789" )
def a__ ( ) -> int | None:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__lowerCAmelCase: Tuple = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__lowerCAmelCase: int = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : List[str] , lowercase : List[str]=13 , lowercase : Optional[Any]=64 , lowercase : int=3 , lowercase : int=3 , lowercase : Tuple=2 , lowercase : Tuple=1 , lowercase : Dict=16 , lowercase : str=[128, 256, 384] , lowercase : List[Any]=[4, 6, 8] , lowercase : Optional[Any]=[2, 3, 4] , lowercase : str=[16, 16, 16] , lowercase : str=0 , lowercase : Optional[int]=[2, 2, 2] , lowercase : Optional[Any]=[2, 2, 2] , lowercase : Union[str, Any]=0.02 , lowercase : Union[str, Any]=True , lowercase : List[str]=True , lowercase : List[str]=2 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = kernel_size
_snake_case = stride
_snake_case = padding
_snake_case = hidden_sizes
_snake_case = num_attention_heads
_snake_case = depths
_snake_case = key_dim
_snake_case = drop_path_rate
_snake_case = patch_size
_snake_case = attention_ratio
_snake_case = mlp_ratio
_snake_case = initializer_range
_snake_case = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_snake_case = is_training
_snake_case = use_labels
_snake_case = num_labels
_snake_case = initializer_range
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[int] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A ( self : Tuple , lowercase : Dict , lowercase : Tuple , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = LevitModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
_snake_case = (self.image_size, self.image_size)
_snake_case , _snake_case = image_size[0], image_size[1]
for _ in range(4 ):
_snake_case = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_snake_case = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A ( self : Any , lowercase : Any , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LevitForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LevitModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Dict , lowercase : int ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowercase ) , lowercase )
_snake_case = (self.model_tester.image_size, self.model_tester.image_size)
_snake_case , _snake_case = image_size[0], image_size[1]
for _ in range(4 ):
_snake_case = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_snake_case = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any , lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[Any]=False ):
'''simple docstring'''
_snake_case = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_snake_case = model_class(lowercase )
model.to(lowercase )
model.train()
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_snake_case = model(**lowercase ).loss
loss.backward()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case = False
_snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_snake_case = model_class(lowercase )
model.gradient_checkpointing_enable()
model.to(lowercase )
model.train()
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_snake_case = model(**lowercase ).loss
loss.backward()
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
_snake_case = problem_type['title']
_snake_case = problem_type['num_labels']
_snake_case = model_class(lowercase )
model.to(lowercase )
model.train()
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if problem_type["num_labels"] > 1:
_snake_case = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_snake_case = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase ) as warning_list:
_snake_case = model(**lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A ( self : str ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LevitModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> int:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 130 |
from __future__ import annotations
import requests
_lowerCamelCase : List[str] = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def a_ ( __lowercase : str , __lowercase : int = 1 , __lowercase : str = "new" , __lowercase : list | None = None ) -> dict:
_snake_case = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
_snake_case = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case = {}
for id_ in range(__lowercase ):
_snake_case = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 130 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a_ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE = tensor_name.split('.')
for split in splits[:-1]:
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
SCREAMING_SNAKE_CASE = new_module
SCREAMING_SNAKE_CASE = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''')
SCREAMING_SNAKE_CASE = tensor_name in module._buffers
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
if old_value.device == torch.device('meta') and device not in ["meta", torch.device('meta')] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''')
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = hasattr(bnb.nn , 'Params4bit') and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
SCREAMING_SNAKE_CASE = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
SCREAMING_SNAKE_CASE = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE = old_value.to(_UpperCAmelCase)
elif isinstance(_UpperCAmelCase , torch.Tensor):
SCREAMING_SNAKE_CASE = value.to('cpu')
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse(
'0.37.2')
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')
else:
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase , device='cpu')
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _UpperCAmelCase) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE = new_value.T
SCREAMING_SNAKE_CASE = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE = bnb.nn.IntaParams(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase).to(_UpperCAmelCase)
elif is_abit:
SCREAMING_SNAKE_CASE = bnb.nn.Paramsabit(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase).to(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_UpperCAmelCase))
else:
if value is None:
SCREAMING_SNAKE_CASE = old_value.to(_UpperCAmelCase)
elif isinstance(_UpperCAmelCase , torch.Tensor):
SCREAMING_SNAKE_CASE = value.to(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase)
if is_buffer:
SCREAMING_SNAKE_CASE = new_value
else:
SCREAMING_SNAKE_CASE = nn.Parameter(_UpperCAmelCase , requires_grad=old_value.requires_grad)
SCREAMING_SNAKE_CASE = new_value
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False):
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE = []
current_key_name.append(_UpperCAmelCase)
if (isinstance(_UpperCAmelCase , nn.Linear) or isinstance(_UpperCAmelCase , _UpperCAmelCase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_UpperCAmelCase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = module.weight.shape
else:
SCREAMING_SNAKE_CASE = module.in_features
SCREAMING_SNAKE_CASE = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE = type(_UpperCAmelCase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase)
if len(list(module.children())) > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_been_replaced=_UpperCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.')
return model
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _UpperCAmelCase , )
return replace_with_bnb_linear(*_UpperCAmelCase , **_UpperCAmelCase)
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _UpperCAmelCase , )
return set_module_quantized_tensor_to_device(*_UpperCAmelCase , **_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE = find_tied_parameters(_UpperCAmelCase)
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase , [])
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE = not hasattr(_UpperCAmelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE = list(model.named_children())
SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE = set(_UpperCAmelCase) - set(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = list(set(_UpperCAmelCase)) + list(_UpperCAmelCase)
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE = ['.weight', '.bias']
SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE = name.replace(_UpperCAmelCase , '')
filtered_module_names.append(_UpperCAmelCase)
return filtered_module_names
| 137 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , a , a=13 , a=64 , a=2 , a=3 , a=True , a=True , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=10 , a=0.02 , a=[1, 16, 4, 4] , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_lowercase : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Any = False
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(a)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4))
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt')
SCREAMING_SNAKE_CASE = model(**a)
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 137 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ) -> Dict:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ = boundary[0]
UpperCAmelCase_ = boundary[1]
UpperCAmelCase_ = make_points(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = 0.0
y += (h / 2.0) * f(__UpperCamelCase )
for i in x_i:
# print(i)
y += h * f(__UpperCamelCase )
y += (h / 2.0) * f(__UpperCamelCase )
return y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Any ) -> Tuple:
UpperCAmelCase_ = a + h
while x < (b - h):
yield x
UpperCAmelCase_ = x + h
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple: # enter your function here
UpperCAmelCase_ = (x - 0) * (x - 0)
return y
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCAmelCase_ = 0.0 # Lower bound of integration
UpperCAmelCase_ = 1.0 # Upper bound of integration
UpperCAmelCase_ = 10.0 # define number of steps or resolution
UpperCAmelCase_ = [a, b] # define boundary of integration
UpperCAmelCase_ = method_a(__UpperCamelCase , __UpperCamelCase )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 177 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCamelCase = tuple[int, int]
class a :
'''simple docstring'''
def __init__( self : str , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
UpperCAmelCase_ = vertices
UpperCAmelCase_ = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self : Any , __snake_case : EdgeT , __snake_case : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ = weight
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ = edge
UpperCAmelCase_ = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
with open(__UpperCamelCase ) as f:
UpperCAmelCase_ = f.read().strip().split('''\n''' )
UpperCAmelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
UpperCAmelCase_ = graph.prims_algorithm()
UpperCAmelCase_ = sum(graph.edges.values() )
UpperCAmelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 177 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger()
@dataclass
class a :
SCREAMING_SNAKE_CASE : nn.Module
SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=__snake_case )
SCREAMING_SNAKE_CASE : list = field(default_factory=__snake_case )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor ) -> Optional[int]:
lowerCamelCase_ = len(list(m.modules() ) ) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE )
def __call__( self : str , __SCREAMING_SNAKE_CASE : Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase ( self : List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a :
SCREAMING_SNAKE_CASE : nn.Module
SCREAMING_SNAKE_CASE : nn.Module
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List = field(default_factory=__snake_case )
SCREAMING_SNAKE_CASE : List = field(default_factory=__snake_case )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor ) -> str:
lowerCamelCase_ = Tracker(self.dest )(__SCREAMING_SNAKE_CASE ).parametrized
lowerCamelCase_ = Tracker(self.src )(__SCREAMING_SNAKE_CASE ).parametrized
lowerCamelCase_ = list(filter(lambda __SCREAMING_SNAKE_CASE : type(__SCREAMING_SNAKE_CASE ) not in self.src_skip , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = list(filter(lambda __SCREAMING_SNAKE_CASE : type(__SCREAMING_SNAKE_CASE ) not in self.dest_skip , __SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE )} operations while'''
F''' destination module has {len(__SCREAMING_SNAKE_CASE )}.''' )
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : ResNetConfig , _lowerCamelCase : Path , _lowerCamelCase : bool = True ) -> Tuple:
print(F'''Converting {name}...''' )
with torch.no_grad():
lowerCamelCase_ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ).eval()
lowerCamelCase_ = ResNetForImageClassification(_lowerCamelCase ).eval()
lowerCamelCase_ = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase )
lowerCamelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
assert torch.allclose(from_model(_lowerCamelCase ) , our_model(_lowerCamelCase ).logits ), "The model logits don't match the original one."
lowerCamelCase_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_lowerCamelCase , )
# we can use the convnext one
lowerCamelCase_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_lowerCamelCase , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( _lowerCamelCase : Path , _lowerCamelCase : str = None , _lowerCamelCase : bool = True ) -> Tuple:
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = 1000
lowerCamelCase_ = (1, num_labels)
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = num_labels
lowerCamelCase_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
lowerCamelCase_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_lowerCamelCase , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 183 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ) -> int:
lowerCamelCase_ = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 183 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[Any] = DanceDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ = IPNDMScheduler()
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_lowerCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_lowerCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCAmelCase__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = DanceDiffusionPipeline(**_lowerCamelCase )
lowerCAmelCase__ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase__ = pipe(**_lowerCamelCase )
lowerCAmelCase__ = output.audios
lowerCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch_device
lowerCAmelCase__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(generator=_lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowerCAmelCase__ = output.audios
lowerCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch_device
lowerCAmelCase__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(generator=_lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowerCAmelCase__ = output.audios
lowerCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 369 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCAmelCase__ = CLIPTextModel(_UpperCamelCase )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = TextToVideoSDPipeline(**_UpperCamelCase )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = sd_pipe(**_UpperCamelCase ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCAmelCase__ = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
lowerCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ = pipe.to('cuda' )
lowerCAmelCase__ = 'Spiderman is surfing'
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=25 , output_type='pt' ).frames
lowerCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
lowerCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ = pipe.to('cuda' )
lowerCAmelCase__ = 'Spiderman is surfing'
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='pt' ).frames
lowerCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 122 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 248 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ ( __A, __A ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(__A ) - np.asarray(__A )) ** 2 ) )
def lowerCAmelCase_ ( __A, __A ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(__A, __A ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])", number=10_000, globals=globals(), ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])", number=10_000, globals=globals(), ) )
benchmark()
| 143 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 143 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str=False ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ""
if is_panoptic:
SCREAMING_SNAKE_CASE_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_56:]
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE_ = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_ = 2_50
else:
SCREAMING_SNAKE_CASE_ = 91
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE_ = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format=__lowerCamelCase )
# prepare image
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__lowerCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE_ = torch.hub.load('DeppMeng/ConditionalDETR' , __lowerCamelCase , pretrained=__lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE_ = "conditional_detr." + src
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = rename_backbone_keys(__lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase , is_panoptic=__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = ConditionalDetrForSegmentation(__lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCamelCase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
SCREAMING_SNAKE_CASE_ = conditional_detr(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 225 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( A_ ):
A__ : Dict = (DDIMParallelScheduler,)
A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(**snake_case__ )
snake_case : Any = scheduler_class(**snake_case__ )
snake_case , snake_case : Union[str, Any] = 10, 0.0
snake_case : List[Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ , eta=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : List[Any] = self.get_scheduler_config()
snake_case : int = scheduler_class(**snake_case__ )
snake_case , snake_case : Any = 10, 0.0
scheduler.set_timesteps(snake_case__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : str = self.dummy_sample_deter
snake_case : Dict = self.dummy_sample_deter + 0.1
snake_case : Dict = self.dummy_sample_deter - 0.1
snake_case : Optional[Any] = samplea.shape[0]
snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ )
snake_case : Dict = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = self.full_loop()
snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case : int = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : str = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 59 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = AudioLDMPipeline
a__ : Optional[int] = TEXT_TO_AUDIO_PARAMS
a__ : Any = TEXT_TO_AUDIO_BATCH_PARAMS
a__ : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowercase (self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCAmelCase_ = ClapTextModelWithProjection(__a )
UpperCAmelCase_ = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
UpperCAmelCase_ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
UpperCAmelCase_ = SpeechTaHifiGan(__a )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _lowercase (self : Dict , __a : Any , __a : Any=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
UpperCAmelCase_ = audio[:10]
UpperCAmelCase_ = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = audioldm_pipe.text_encoder(
__a , )
UpperCAmelCase_ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ = F.normalize(__a , dim=-1 )
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * ["this is a negative prompt"]
UpperCAmelCase_ = negative_prompt
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = audioldm_pipe.text_encoder(
__a , )
UpperCAmelCase_ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ = F.normalize(__a , dim=-1 )
embeds.append(__a )
UpperCAmelCase_ , UpperCAmelCase_ = embeds
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowercase (self : str ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = "egg cracking"
UpperCAmelCase_ = audioldm_pipe(**__a , negative_prompt=__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
UpperCAmelCase_ = audio[:10]
UpperCAmelCase_ = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(audio_length_in_s=0.0_16 , **__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_16
UpperCAmelCase_ = audioldm_pipe(audio_length_in_s=0.0_32 , **__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_32
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = ["hey"]
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=1 )
UpperCAmelCase_ = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase_ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ = SpeechTaHifiGan(__a ).to(__a )
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=1 )
UpperCAmelCase_ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _lowercase (self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def _lowercase (self : Optional[Any] ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase (self : str ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="cpu" , __a : int=torch.floataa , __a : Union[str, Any]=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _lowercase (self : str ):
UpperCAmelCase_ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = 25
UpperCAmelCase_ = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
UpperCAmelCase_ = audio[77230:77240]
UpperCAmelCase_ = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
UpperCAmelCase_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
UpperCAmelCase_ = audio[27780:27790]
UpperCAmelCase_ = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
UpperCAmelCase_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 106 | '''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = 0
if start < end:
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ , UpperCAmelCase_ = _in_place_partition(snake_case_ , snake_case_ , snake_case_ )
count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 )
count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ = start - 1
for index in range(snake_case_ , snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ = new_pivot_index + 1
UpperCAmelCase_ = a[new_pivot_index]
UpperCAmelCase_ = a[index]
UpperCAmelCase_ = temp
UpperCAmelCase_ = a[new_pivot_index + 1]
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_: List[str] =TemporaryFile()
SCREAMING_SNAKE_CASE_: int =1_00 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_: List[str] =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_: str =np.load(outfile)
SCREAMING_SNAKE_CASE_: List[Any] =len(M) - 1
SCREAMING_SNAKE_CASE_: Dict =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 106 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Optional[int] =torch.device('''cpu''')
def SCREAMING_SNAKE_CASE_ () -> int:
lowerCamelCase__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : str = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[str] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Any = []
for k in state_dict.keys():
lowerCamelCase__ : List[Any] = k
if ".pwconv" in k:
lowerCamelCase__ : List[str] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowerCamelCase__ : str = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowerCamelCase__ : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowerCamelCase__ : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowerCamelCase__ : Union[str, Any] = k_new.split(""".""" )
if ls[2].isdigit():
lowerCamelCase__ : Optional[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCamelCase__ : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : Any = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Optional[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ : str = [3, 3, 6, 4]
lowerCamelCase__ : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ : List[Any] = [3, 3, 9, 6]
lowerCamelCase__ : Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ : int = [4, 3, 10, 5]
lowerCamelCase__ : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ : Optional[int] = [4, 4, 12, 6]
lowerCamelCase__ : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowerCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : str = checkpoint
lowerCamelCase__ : List[str] = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
lowerCamelCase__ : List[str] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowerCamelCase__ : Union[str, Any] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
lowerCamelCase__ : Tuple = get_expected_output(UpperCamelCase )
lowerCamelCase__ : str = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1E-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A : int =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 41 |
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( __snake_case ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 362 | """simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCAmelCase , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any , UpperCAmelCase_ : GenericTensor ):
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def A_ ( self : Optional[Any] , UpperCAmelCase_ : GenericTensor ):
SCREAMING_SNAKE_CASE__ = self.get_masked_index(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : GenericTensor ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] ):
if return_tensors is None:
SCREAMING_SNAKE_CASE__ = self.framework
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.ensure_exactly_one_mask_token(UpperCAmelCase_ )
return model_inputs
def A_ ( self : Tuple , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = self.model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model_inputs['input_ids']
return model_outputs
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE__ = target_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE__ = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE__ = outputs.numpy()
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = stable_softmax(UpperCAmelCase_ , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = tf.gather_nd(tf.squeeze(UpperCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(UpperCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE__ = tf.math.top_k(UpperCAmelCase_ , k=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = probs[..., target_ids]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = probs.topk(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE__ = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = target_ids[p].tolist()
SCREAMING_SNAKE_CASE__ = p
# Filter padding out:
SCREAMING_SNAKE_CASE__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(UpperCAmelCase_ )
result.append(UpperCAmelCase_ )
if single_mask:
return result[0]
return result
def A_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [targets]
try:
SCREAMING_SNAKE_CASE__ = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
for target in targets:
SCREAMING_SNAKE_CASE__ = vocab.get(UpperCAmelCase_ , UpperCAmelCase_ )
if id_ is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , max_length=1 , truncation=UpperCAmelCase_ , )['input_ids']
if len(UpperCAmelCase_ ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
return target_ids
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None ):
SCREAMING_SNAKE_CASE__ = {}
if targets is not None:
SCREAMING_SNAKE_CASE__ = self.get_target_ids(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Tuple , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1:
return outputs[0]
return outputs
| 176 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class A__ ( A__ ):
A__ = 'swin2sr'
A__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , _a : Any=64 , _a : Optional[Any]=1 , _a : str=3 , _a : Optional[Any]=180 , _a : str=[6, 6, 6, 6, 6, 6] , _a : Dict=[6, 6, 6, 6, 6, 6] , _a : Any=8 , _a : str=2.0 , _a : int=True , _a : Optional[Any]=0.0 , _a : List[str]=0.0 , _a : List[str]=0.1 , _a : List[Any]="gelu" , _a : Any=False , _a : Any=0.02 , _a : Dict=1e-5 , _a : Union[str, Any]=2 , _a : Any=1.0 , _a : int="1conv" , _a : Dict="pixelshuffle" , **_a : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embed_dim
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =use_absolute_embeddings
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =upscale
_SCREAMING_SNAKE_CASE =img_range
_SCREAMING_SNAKE_CASE =resi_connection
_SCREAMING_SNAKE_CASE =upsampler
| 114 |
'''simple docstring'''
import os
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , 'triangle.txt' )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
for line in triangle:
_SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1] ) else 0
_SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 114 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _snake_case ( snake_case__ : List[str] ):
A = []
for line in lines:
A = re.sub(r'#.*' , '' , snake_case__ ) # remove comments
if line:
filtered_lines.append(snake_case__ )
A = '\n'.join(snake_case__ )
# Make a hash from all this code
A = full_str.encode('utf-8' )
return shaaaa(snake_case__ ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''') | 74 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> str:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,__snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
a__ = copy.deepcopy(self )
a__ = self.label_schema.copy()
a__ = features[self.label_column]
a__ = label_schema
return task_template
@property
def lowerCamelCase__( self :Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 240 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase = """backbone.""" if is_semantic else """"""
UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(f"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(f"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(f"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCamelCase = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCamelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
UpperCamelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = q_bias
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
UpperCamelCase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
UpperCamelCase = gamma_a
UpperCamelCase = gamma_a
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = dct.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> int:
'''simple docstring'''
UpperCamelCase = False if """rvlcdip""" in checkpoint_url else True
UpperCamelCase = BeitConfig(use_absolute_position_embeddings=UpperCamelCase_ , use_mask_token=UpperCamelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase = 16
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """rvlcdip-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )["""model"""]
UpperCamelCase = create_rename_keys(UpperCamelCase_ , has_lm_head=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , has_lm_head=UpperCamelCase_ )
# load HuggingFace model
UpperCamelCase = BeitForMaskedImageModeling(UpperCamelCase_ ) if has_lm_head else BeitForImageClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image
UpperCamelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
UpperCamelCase = model(UpperCamelCase_ )
UpperCamelCase = outputs.logits
# verify logits
UpperCamelCase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(UpperCamelCase_ ), "Shape of logits not as expected"
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
if has_lm_head:
UpperCamelCase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCamelCase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase_ , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 368 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO: upload to AWS
_SCREAMING_SNAKE_CASE = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """retribert"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Any=3_0522 , lowerCamelCase_ : List[Any]=768 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : str=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=1E-12 , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=128 , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = share_encoders
UpperCamelCase = projection_dim
| 165 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 | """simple docstring"""
from jiwer import compute_measures
import datasets
__A = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__A = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__A = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(_UpperCAmelCase , _UpperCAmelCase )["wer"]
else:
lowercase__: Dict = 0
lowercase__: Union[str, Any] = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = compute_measures(_UpperCAmelCase , _UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 177 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def snake_case ( A__ ,A__ = 16 ):
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : int = load_dataset("glue" ,"mrpc" )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : str = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__snake_case ,max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Dict = datasets.map(
__snake_case ,batched=__snake_case ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : List[str] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : int = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Optional[int] = 8
else:
UpperCAmelCase_ : List[Any] = None
return tokenizer.pad(
__snake_case ,padding="longest" ,max_length=__snake_case ,pad_to_multiple_of=__snake_case ,return_tensors="pt" ,)
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] ,shuffle=__snake_case ,collate_fn=__snake_case ,batch_size=__snake_case )
UpperCAmelCase_ : Optional[int] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__snake_case ,collate_fn=__snake_case ,batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def snake_case ( A__ ,A__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__snake_case ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : Optional[Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Any = config["lr"]
UpperCAmelCase_ : Any = int(config["num_epochs"] )
UpperCAmelCase_ : Any = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : Tuple = evaluate.load("glue" ,"mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
UpperCAmelCase_ : str = get_dataloaders(__snake_case ,__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : str = AdamW(params=model.parameters() ,lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case ,num_warmup_steps=1_00 ,num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : List[str] = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : List[Any] = model(**__snake_case )
UpperCAmelCase_ : Dict = outputs.loss
UpperCAmelCase_ : str = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__snake_case ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase_ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__snake_case ,references=__snake_case ,)
UpperCAmelCase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__snake_case )
def snake_case ( ):
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main()
| 364 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def snake_case ( A__ ,A__ ):
# For applying gaussian function for each element in matrix.
UpperCAmelCase_ : int = math.sqrt(A__ )
UpperCAmelCase_ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case ( A__ ,A__ ):
# Creates a gaussian kernel of given dimension.
UpperCAmelCase_ : List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,A__ ):
for j in range(0 ,A__ ):
UpperCAmelCase_ : List[Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ,):
UpperCAmelCase_ : Union[str, Any] = np.zeros(img.shape )
UpperCAmelCase_ : Tuple = get_gauss_kernel(A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
UpperCAmelCase_ : str = get_slice(A__ ,A__ ,A__ ,A__ )
UpperCAmelCase_ : int = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCAmelCase_ : Optional[Any] = vec_gaussian(A__ ,A__ )
UpperCAmelCase_ : int = np.multiply(A__ ,A__ )
UpperCAmelCase_ : List[Any] = np.multiply(A__ ,A__ )
UpperCAmelCase_ : str = np.sum(A__ ) / np.sum(A__ )
UpperCAmelCase_ : Tuple = val
return imga
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = args[1] if args[1:] else "../image_data/lena.jpg"
UpperCAmelCase_ : Tuple = float(args[2] ) if args[2:] else 1.0
UpperCAmelCase_ : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCAmelCase_ : Dict = int(args[4] )
UpperCAmelCase_ : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCAmelCase_ : Dict = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parse_args(sys.argv)
lowerCamelCase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowerCamelCase_ = img / 255
lowerCamelCase_ = out.astype('''float32''')
lowerCamelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase_ = out * 255
lowerCamelCase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 253 | 0 |
def __UpperCAmelCase ( a_):
return " ".join(
''.join(word[::-1]) if len(a_) > 4 else word for word in sentence.split())
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 178 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=a_ ) as mock_head:
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=a_ ) as mock_head:
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
UpperCamelCase__ = tempfile.mktemp()
with open(a_, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", a_ )
UpperCamelCase__ = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", a_ )
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowercase_ ( cls : Any ):
"""simple docstring"""
UpperCamelCase__ = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowercase_ ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(a_ )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_, repo_id="test-tokenizer", push_to_hub=a_, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(a_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_, repo_id="valid_org/test-tokenizer-org", push_to_hub=a_, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def lowercase_ ( self : Tuple ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
UpperCamelCase__ = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer', use_fast=a_, trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
UpperCamelCase__ = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_, ["AB", "C"] ) | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226 |
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : str = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__UpperCAmelCase : Dict = len(_UpperCAmelCase ) // 2
__UpperCAmelCase : Union[str, Any] = arr[0:mid]
__UpperCAmelCase : Optional[Any] = arr[mid:]
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCAmelCase : List[str] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCAmelCase : Any = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
# an empty list should also have zero inversions
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( A__ ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = RoCBertTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Tuple = filter_non_english
def lowerCAmelCase (self : List[str] ):
super().setUp()
__a : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__a : Dict = {}
__a : Union[str, Any] = {}
for i, value in enumerate(__A ):
__a : List[Any] = i
__a : List[str] = i
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a : Dict = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__A , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase (self : List[str] ):
__a : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCAmelCase (self : str ):
__a : str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCAmelCase (self : Tuple ):
__a : Dict = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase (self : Optional[int] ):
__a : int = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase (self : Dict ):
__a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase (self : Any ):
__a : Any = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase (self : int ):
__a : int = RoCBertBasicTokenizer(do_lower_case=__A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCAmelCase (self : int ):
__a : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__a : Any = {}
for i, token in enumerate(__A ):
__a : int = i
__a : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCAmelCase (self : int ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCAmelCase (self : List[str] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCAmelCase (self : Dict ):
__a : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
__a : List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCAmelCase (self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
__a : Union[str, Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__a : List[Any] = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
__a : List[str] = tokenizer_r.do_lower_case if hasattr(__A , '''do_lower_case''' ) else False
__a : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCAmelCase (self : str ):
__a : Tuple = ["""的""", """人""", """有"""]
__a : Optional[Any] = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a : str = True
__a : Tuple = self.tokenizer_class.from_pretrained(__A , **__A )
__a : str = self.rust_tokenizer_class.from_pretrained(__A , **__A )
__a : Optional[Any] = tokenizer_p.encode(__A , add_special_tokens=__A )
__a : str = tokenizer_r.encode(__A , add_special_tokens=__A )
__a : List[Any] = tokenizer_r.convert_ids_to_tokens(__A )
__a : List[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
__a : Union[str, Any] = False
__a : Tuple = self.rust_tokenizer_class.from_pretrained(__A , **__A )
__a : str = self.tokenizer_class.from_pretrained(__A , **__A )
__a : Dict = tokenizer_r.encode(__A , add_special_tokens=__A )
__a : Tuple = tokenizer_p.encode(__A , add_special_tokens=__A )
__a : Dict = tokenizer_r.convert_ids_to_tokens(__A )
__a : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
__a : int = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def lowerCAmelCase (self : str ):
__a : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a : Tuple = tokenizer.encode('''你好''' , add_special_tokens=__A )
__a : Union[str, Any] = tokenizer.encode('''你是谁''' , add_special_tokens=__A )
__a : Dict = tokenizer.build_inputs_with_special_tokens(__A )
__a : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase (self : Any ):
__a : Union[str, Any] = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__a : str = """你好,你是谁"""
__a : List[str] = tokenizer.tokenize(__A )
__a : Any = tokenizer.convert_tokens_to_ids(__A )
__a : Tuple = tokenizer.convert_tokens_to_shape_ids(__A )
__a : Dict = tokenizer.convert_tokens_to_pronunciation_ids(__A )
__a : List[Any] = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
__a : Union[str, Any] = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 351 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90 | 0 |
def _A ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCamelCase = 6
__UpperCamelCase = 1
__UpperCamelCase = 19_01
__UpperCamelCase = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCamelCase = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 310 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Tuple:
__lowercase : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : List[Any] = pipeline
__lowercase : str = host
__lowercase : List[str] = port
__lowercase : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ) -> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Optional[int]:
try:
__lowercase : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase_ )
if return_ids:
__lowercase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> Dict:
try:
__lowercase : Tuple = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(UpperCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowercase : Optional[Any] = self._pipeline(UpperCamelCase_ )
return ServeForwardResult(output=UpperCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
| 249 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowercase__ = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :int , **_lowercase :int ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :str , **_lowercase :List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = processor(text=_lowercase )
lowercase__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_lowercase ):
processor()
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 366 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 201 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowercase__ ={
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowercase__ ={
'jukebox': 512,
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_LYRIC_TOKENS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : int=["v3", "v2", "v2"] , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : Tuple=5 , snake_case_ : int="<|endoftext|>" , **snake_case_ : int , ):
__a : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
super().__init__(
unk_token=snake_case_ , n_genres=snake_case_ , version=snake_case_ , max_n_lyric_tokens=snake_case_ , **snake_case_ , )
__a : Any = version
__a : Any = max_n_lyric_tokens
__a : List[str] = n_genres
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__a : int = json.load(snake_case_ )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__a : List[str] = json.load(snake_case_ )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__a : Dict = json.load(snake_case_ )
__a : Optional[Any] = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__a : str = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__a : Dict = regex.compile(snake_case_ )
__a : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
__a : Optional[int] = {v: k for k, v in self.genres_encoder.items()}
__a : Optional[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCAmelCase (self : Tuple ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCAmelCase (self : Union[str, Any] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCAmelCase (self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
__a : List[Any] = [self.artists_encoder.get(snake_case_ , 0 ) for artist in list_artists]
for genres in range(len(snake_case_ ) ):
__a : List[str] = [self.genres_encoder.get(snake_case_ , 0 ) for genre in list_genres[genres]]
__a : str = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__a : str = [[self.lyrics_encoder.get(snake_case_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCAmelCase (self : List[str] , snake_case_ : Any ):
return list(snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int , **snake_case_ : Dict ):
__a , __a , __a : Optional[Any] = self.prepare_for_tokenization(snake_case_ , snake_case_ , snake_case_ )
__a : Tuple = self._tokenize(snake_case_ )
return artist, genre, lyrics
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__a : Union[str, Any] = artists[idx].lower()
__a : List[Any] = [genres[idx].lower()]
else:
__a : Dict = self._normalize(artists[idx] ) + '''.v2'''
__a : List[Any] = [
self._normalize(snake_case_ ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__a : List[Any] = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__a : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__a : str = {vocab[index]: index + 1 for index in range(len(snake_case_ ) )}
__a : Any = 0
__a : Dict = len(snake_case_ ) + 1
__a : Dict = self.vocab
__a : Optional[int] = {v: k for k, v in self.vocab.items()}
__a : Tuple = ''''''
else:
__a : str = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__a : Optional[int] = self._run_strip_accents(snake_case_ )
__a : int = lyrics.replace('''\\''' , '''\n''' )
__a : List[Any] = self.out_of_vocab.sub('''''' , snake_case_ ), [], []
return artists, genres, lyrics
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] ):
__a : str = unicodedata.normalize('''NFD''' , snake_case_ )
__a : str = []
for char in text:
__a : Optional[Any] = unicodedata.category(snake_case_ )
if cat == "Mn":
continue
output.append(snake_case_ )
return "".join(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : str ):
__a : List[str] = (
[chr(snake_case_ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(snake_case_ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(snake_case_ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__a : Any = frozenset(snake_case_ )
__a : Dict = re.compile(r'''_+''' )
__a : Union[str, Any] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__a : Optional[int] = pattern.sub('''_''' , snake_case_ ).strip('''_''' )
return text
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[str] ):
return " ".join(snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : bool = False ):
# Convert to TensorType
if not isinstance(snake_case_ , snake_case_ ):
__a : Optional[Any] = TensorType(snake_case_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__a : int = tf.constant
__a : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__a : int = torch.tensor
__a : Any = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__a : Tuple = jnp.array
__a : Tuple = _is_jax
else:
__a : int = np.asarray
__a : Any = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__a : int = [inputs]
if not is_tensor(snake_case_ ):
__a : Tuple = as_tensor(snake_case_ )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__(self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int="" , snake_case_ : List[Any]="pt" ):
__a : Optional[int] = [0, 0, 0]
__a : Any = [artist] * len(self.version )
__a : List[str] = [genres] * len(self.version )
__a , __a , __a : str = self.tokenize(snake_case_ , snake_case_ , snake_case_ )
__a , __a , __a : List[str] = self._convert_token_to_id(snake_case_ , snake_case_ , snake_case_ )
__a : Union[str, Any] = [-INFINITY] * len(full_tokens[-1] )
__a : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def lowerCAmelCase (self : Optional[int] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case_ ) )
__a : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case_ ) )
__a : str = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case_ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[str] ):
__a : Union[str, Any] = self.artists_decoder.get(snake_case_ )
__a : List[Any] = [self.genres_decoder.get(snake_case_ ) for genre in genres_index]
__a : Union[str, Any] = [self.lyrics_decoder.get(snake_case_ ) for character in lyric_index]
return artist, genres, lyrics
| 216 |
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
__a : Any = x
__a : List[Any] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__a : List[Any] = a * a - b * b + x
__a : Tuple = 2 * a * b + y
__a : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __UpperCamelCase ( lowerCAmelCase__ : int = 8_0_0 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : bool = True , ):
__a : int = Image.new('''RGB''' , (image_width, image_height) )
__a : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Optional[int] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__a : Optional[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 216 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
while a != 0:
_lowerCAmelCase , _lowerCAmelCase = b % a, a
return b
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if gcd(UpperCamelCase__ , UpperCamelCase__ ) != 1:
_lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCamelCase__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 367 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__ : str =5_00_00
A__ : Optional[int] =50_00
A__ , A__ : Optional[int] =os.path.split(__file__)
A__ : Tuple =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowerCAmelCase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowerCAmelCase = generate_example_dataset(
os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(lowerCAmelCase , **lowerCAmelCase )
print("""shuffling dataset""" )
_lowerCAmelCase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(
lowerCAmelCase , **lowerCAmelCase )
with open(lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 220 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a : List[Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a : List[Any] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = '''maskformer'''
A = {'''hidden_size''': '''mask_feature_size'''}
A = ['''resnet''', '''swin''']
A = ['''detr''']
def __init__(self, SCREAMING_SNAKE_CASE_ = 256, SCREAMING_SNAKE_CASE_ = 256, SCREAMING_SNAKE_CASE_ = 0.1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0.0_2, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 2_0.0, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_: Tuple = SwinConfig(
image_size=384, in_channels=3, patch_size=4, embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12, drop_path_rate=0.3, out_features=["""stage1""", """stage2""", """stage3""", """stage4"""], )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Union[str, Any] = backbone_config.pop("""model_type""" )
UpperCAmelCase_: List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_: Tuple = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_: Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_: Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: List[str] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_: Union[str, Any] = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = backbone_config
UpperCAmelCase_: List[Any] = decoder_config
# main feature dimension for the model
UpperCAmelCase_: Optional[int] = fpn_feature_size
UpperCAmelCase_: str = mask_feature_size
# initializer
UpperCAmelCase_: Any = init_std
UpperCAmelCase_: Dict = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_: int = cross_entropy_weight
UpperCAmelCase_: Union[str, Any] = dice_weight
UpperCAmelCase_: Optional[Any] = mask_weight
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: Tuple = no_object_weight
UpperCAmelCase_: int = output_auxiliary_logits
UpperCAmelCase_: Tuple = self.decoder_config.encoder_attention_heads
UpperCAmelCase_: Dict = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case (cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE_, decoder_config=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def __snake_case (self ) -> Dict[str, any]:
UpperCAmelCase_: Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_: List[Any] = self.backbone_config.to_dict()
UpperCAmelCase_: Any = self.decoder_config.to_dict()
UpperCAmelCase_: Union[str, Any] = self.__class__.model_type
return output
| 147 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = '▁'
a : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
a : Optional[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a : Any = {
'facebook/xglm-564M': 2_048,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCAmelCase_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_: Optional[int] = 7
UpperCAmelCase_: Dict = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCAmelCase_: List[Any] = kwargs.get("""additional_special_tokens""", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_: Dict = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_: Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase_: Union[str, Any] = len(self.sp_model )
UpperCAmelCase_: Optional[int] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Any:
UpperCAmelCase_: List[Any] = self.__dict__.copy()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
UpperCAmelCase_: int = {}
UpperCAmelCase_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_: List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __snake_case (self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_: str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, """ """ ).strip()
return out_string
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, """wb""" ) as fi:
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 147 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 3_8_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_lowerCamelCase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCamelCase = int(shortest_edge / crop_pct )
_lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = CustomTokenizer
pass
| 48 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = """.""".join([key, hf_param_name] )
else:
a = key
a = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
a = False
for key, mapped_key in MAPPING.items():
a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = """weight"""
else:
a = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
a = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> List[Any]:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(__lowerCamelCase )
a = idalabel
a = WavaVecaForSequenceClassification(__lowerCamelCase )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
a = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == """layer""" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = WavaVecaForCTC(__lowerCamelCase )
else:
a = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = argparse.Namespace(task="""audio_pretraining""" )
a = fairseq.tasks.setup_task(__lowerCamelCase )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 228 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> list:
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: List[Any] = False
if low == high:
return swapped
__lowerCAmelCase: int = low
__lowerCAmelCase: List[str] = high
while left < right:
if collection[left] > collection[right]:
__lowerCAmelCase , __lowerCAmelCase: List[str] = (
collection[right],
collection[left],
)
__lowerCAmelCase: Any = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = (
collection[right + 1],
collection[left],
)
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Optional[int] = low + int((high - low) / 2 )
__lowerCAmelCase: str = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__lowerCAmelCase: Optional[int] = True
while is_not_sorted is True:
__lowerCAmelCase: Union[str, Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 108 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__A = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 108 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : Optional[Any] , A : Any=2 , A : Union[str, Any]=32 , A : Any=16 , A : Any=3 , A : Optional[Any]=True , A : Optional[Any]=True , A : Any=32 , A : Tuple=4 , A : int=[0, 1, 2, 3] , A : Tuple=4 , A : Union[str, Any]=37 , A : List[Any]="gelu" , A : Any=0.1 , A : Optional[Any]=0.1 , A : Tuple=0.02 , A : int=3 , A : Union[str, Any]=[1, 384, 24, 24] , A : Optional[Any]=True , A : Optional[int]=None , ):
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = backbone_out_indices
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
_UpperCAmelCase : Optional[Any] = num_patches + 1
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A , backbone_featmap_shape=self.backbone_featmap_shape , )
def _A ( self : Any , A : Optional[int] , A : List[Any] , A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = DPTModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Dict , A : Union[str, Any] , A : List[Any] , A : int ):
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : int = DPTForDepthEstimation(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _A ( self : int , A : List[str] , A : Tuple , A : List[str] ):
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : List[Any] = DPTForSemanticSegmentation(A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase: Union[str, Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Optional[int] = False
__UpperCamelCase: str = False
__UpperCamelCase: Tuple = False
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = DPTModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def _A ( self : int ):
pass
def _A ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(A )
_UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def _A ( self : int ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A )
def _A ( self : Tuple ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def _A ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = True
if model_class in get_values(A ):
continue
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.train()
_UpperCAmelCase : str = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Dict = model(**A ).loss
loss.backward()
def _A ( self : Tuple ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = True
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase : Tuple = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase : List[Any] = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Tuple = model(**A ).loss
loss.backward()
def _A ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = _config_zero_init(A )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=A )
# Skip the check for the backbone
_UpperCAmelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : Union[str, Any] ):
pass
@slow
def _A ( self : List[Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(A )
self.assertIsNotNone(A )
def _A ( self : Tuple ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = "add"
with self.assertRaises(A ):
_UpperCAmelCase : List[str] = DPTForDepthEstimation(A )
def UpperCamelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : str ):
_UpperCAmelCase : int = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_UpperCAmelCase : List[str] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A )
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : int = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**A )
_UpperCAmelCase : List[Any] = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A )
_UpperCAmelCase : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A , atol=1E-4 ) )
| 31 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 1000 , A : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
_UpperCAmelCase : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase : int = 4
# running values
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : int , A : Union[str, torch.device] = None ):
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase : Dict = timesteps.to(A )
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase : Optional[Any] = timestep_index + 1
_UpperCAmelCase : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
_UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase : Union[str, Any] = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Dict ):
return sample
def _A ( self : Optional[Any] , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] ):
_UpperCAmelCase : List[str] = self.alphas[timestep_index]
_UpperCAmelCase : List[Any] = self.betas[timestep_index]
_UpperCAmelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCAmelCase : Dict = self.betas[prev_timestep_index]
_UpperCAmelCase : Tuple = (sample - sigma * ets) / max(A , 1E-8 )
_UpperCAmelCase : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 31 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {'shortest_edge': 18}
UpperCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Tuple = LevitImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LevitImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 110 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = GPTSwaTokenizer
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = True
__lowercase : Tuple = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(A_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = 'This is a test'
return input_text, output_text
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(A_ ) , 2_000 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(A_ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(A_ )
UpperCamelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCamelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCamelCase = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=A_ , )
| 110 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : list[tuple[float, float]]) ->List[str]:
'''simple docstring'''
A__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A__ = len(UpperCAmelCase__) - 1
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : float) ->list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__) , 5) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : float) ->tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = self.basis_function(UpperCAmelCase__)
A__ = 0.0
A__ = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : float = 0.01) ->int:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
A__ = [] # x coordinates of points to plot
A__ = [] # y coordinates of points to plot
A__ = 0.0
while t <= 1:
A__ = self.bezier_curve_function(UpperCAmelCase__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
A__ = [i[0] for i in self.list_of_points]
A__ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 14 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase__=64 , lowerCamelCase__=48_000 , lowerCamelCase__=480 , lowerCamelCase__=10 , lowerCamelCase__=1_024 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 14_000 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = top_db
__lowerCamelCase = truncation
__lowerCamelCase = padding
__lowerCamelCase = fft_window_size
__lowerCamelCase = (fft_window_size >> 1) + 1
__lowerCamelCase = hop_length
__lowerCamelCase = max_length_s
__lowerCamelCase = max_length_s * sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = frequency_min
__lowerCamelCase = frequency_max
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='htk' , )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='slaney' , mel_scale='slaney' , )
def lowercase_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
# randomly choose index for each part
__lowerCamelCase = np.random.choice(ranges[0] )
__lowerCamelCase = np.random.choice(ranges[1] )
__lowerCamelCase = np.random.choice(ranges[2] )
__lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase = torch.tensor(mel[None, None, :] )
__lowerCamelCase = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = mel_shrink[0][0].numpy()
__lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase = len(lowerCamelCase__ ) - max_length
__lowerCamelCase = np.random.randint(0 , overflow + 1 )
__lowerCamelCase = waveform[idx : idx + max_length]
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase = False
else:
__lowerCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__lowerCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
__lowerCamelCase = truncation if truncation is not None else self.truncation
__lowerCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__lowerCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__lowerCamelCase = []
__lowerCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) )
__lowerCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase = [[longer] for longer in is_longer]
__lowerCamelCase = {'input_features': input_mel, 'is_longer': is_longer}
__lowerCamelCase = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__lowerCamelCase = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 90 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ''
_lowerCamelCase : Tuple = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : int , UpperCAmelCase : Optional[DatasetInfo] = None , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(self , **UpperCAmelCase )
A_ = repo_info
A_ = token
A_ = None
def __A ( self : Optional[Any] ):
if self.dir_cache is None:
A_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str = "rb" , **UpperCAmelCase : str , ):
if not isinstance(self.repo_info , UpperCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
A_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
self._get_dirs()
A_ = self._strip_protocol(UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int=False , **UpperCAmelCase : Optional[Any] ):
self._get_dirs()
A_ = PurePosixPath(path.strip("/" ) )
A_ = {}
for p, f in self.dir_cache.items():
A_ = PurePosixPath(p.strip("/" ) )
A_ = p.parent
if root == path:
A_ = f
A_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out ) | 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size | 329 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = tf.convert_to_tensor(A__ )
UpperCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = tf.convert_to_tensor(A__ )
UpperCamelCase = tf.cast(math.pi , x.dtype )
UpperCamelCase = tf.cast(0.044_715 , x.dtype )
UpperCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A__ , 3 )) ))
return x * cdf
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = tf.convert_to_tensor(A__ )
return x * tf.tanh(tf.math.softplus(A__ ) )
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tf.convert_to_tensor(A__ )
UpperCamelCase = tf.cast(0.044_715 , x.dtype )
UpperCamelCase = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tf.convert_to_tensor(A__ )
UpperCamelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
return tf.clip_by_value(_gelu(A__ ) , -10 , 10 )
def __lowerCamelCase ( A__ , A__=-1 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = tf.split(A__ , 2 , axis=A__ )
return a * tf.math.sigmoid(A__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
return tf.keras.activations.gelu(A__ , approximate=A__ )
_lowerCamelCase : Union[str, Any] = tf.keras.activations.gelu
_lowerCamelCase : Dict = approximate_gelu_wrap
else:
_lowerCamelCase : Tuple = _gelu
_lowerCamelCase : Optional[Any] = _gelu_new
_lowerCamelCase : str = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
def lowerCamelCase__ ( a__ : str , a__ : str ) -> bool:
UpperCamelCase_ = len(a__ ) + 1
UpperCamelCase_ = len(a__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCamelCase_ = [[0 for i in range(a__ )] for j in range(a__ )]
# since string of zero length match pattern of zero length
UpperCamelCase_ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , a__ ):
UpperCamelCase_ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , a__ ):
UpperCamelCase_ = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , a__ ):
for j in range(1 , a__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCamelCase_ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCamelCase_ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCamelCase_ = dp[i - 1][j]
else:
UpperCamelCase_ = 0
else:
UpperCamelCase_ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_A = '''aab'''
_A = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 261 |
import copy
import re
class lowercase_ :
A__ : Optional[Any] = """hp"""
A__ : Union[str, Any] = {}
A__ : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = prefix
UpperCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return ""
UpperCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCamelCase ) + 1 ):
UpperCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCamelCase ):
UpperCamelCase_ = """"""
while integer != 0:
UpperCamelCase_ = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
UpperCamelCase_ = 0
while True:
UpperCamelCase_ = word + """#""" + int_to_alphabetic(__UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase_ = sword
break
UpperCamelCase_ = short_word
UpperCamelCase_ = word
return short_word
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = param_name.split("""_""" )
UpperCamelCase_ = [TrialShortNamer.shortname_for_word(__UpperCamelCase , __UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase_ = ["""""", """_"""]
for separator in separators:
UpperCamelCase_ = separator.join(__UpperCamelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase_ = shortname
UpperCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = TrialShortNamer.shortname_for_key(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = short_name
UpperCamelCase_ = param_name
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
UpperCamelCase_ = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = info
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase_ = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = 1 if v else 0
UpperCamelCase_ = """""" if isinstance(__UpperCamelCase , (int, float) ) else """-"""
UpperCamelCase_ = f'''{key}{sep}{v}'''
name.append(__UpperCamelCase )
return "_".join(__UpperCamelCase )
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase_ = []
else:
UpperCamelCase_ = repr.split("""_""" )
UpperCamelCase_ = {}
for value in values:
if "-" in value:
UpperCamelCase_ , UpperCamelCase_ = value.split("""-""" )
else:
UpperCamelCase_ = re.sub("""[0-9.]""" , """""" , __UpperCamelCase )
UpperCamelCase_ = float(re.sub("""[^0-9.]""" , """""" , __UpperCamelCase ) )
UpperCamelCase_ = cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase_ = cls.DEFAULTS[k]
return parameters
| 261 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ) -> List[Any]:
__lowerCamelCase = {'''add_prefix_space''': True} if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not line.startswith(''' ''' ) else {}
__lowerCamelCase = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ) -> Tuple:
__lowerCamelCase = input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a__ ( UpperCAmelCase__ ):
def __init__( self : Dict , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] , a : Optional[int]="train" , a : List[str]=None , a : List[str]=None , a : List[str]=None , a : Dict="" , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = Path(a ).joinpath(type_path + '''.source''' )
__lowerCamelCase = Path(a ).joinpath(type_path + '''.target''' )
__lowerCamelCase = self.get_char_lens(self.src_file )
__lowerCamelCase = max_source_length
__lowerCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
__lowerCamelCase = tokenizer
__lowerCamelCase = prefix
if n_obs is not None:
__lowerCamelCase = self.src_lens[:n_obs]
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
def __len__( self : Dict ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : int , a : Any ):
"""simple docstring"""
__lowerCamelCase = index + 1 # linecache starts at 1
__lowerCamelCase = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('''\n''' )
__lowerCamelCase = linecache.getline(str(self.tgt_file ) , a ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
__lowerCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
__lowerCamelCase = encode_line(a , a , self.max_source_length , '''right''' )
__lowerCamelCase = encode_line(a , a , self.max_target_length , '''right''' )
__lowerCamelCase = source_inputs['''input_ids'''].squeeze()
__lowerCamelCase = target_inputs['''input_ids'''].squeeze()
__lowerCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Any ):
"""simple docstring"""
return [len(a ) for x in Path(a ).open().readlines()]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
__lowerCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
__lowerCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__lowerCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
__lowerCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
__lowerCamelCase = trim_batch(a , a )
__lowerCamelCase , __lowerCamelCase = trim_batch(a , a , attention_mask=a )
__lowerCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__UpperCAmelCase =getLogger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ ) -> None:
__lowerCamelCase = get_git_info()
save_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''git_log.json''' ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ) -> Tuple:
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = git.Repo(search_parent_directories=UpperCamelCase__ )
__lowerCamelCase = {
'''repo_id''': str(UpperCamelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List:
return list(map(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
with open(UpperCamelCase__ , '''wb''' ) as f:
return pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
def remove_articles(UpperCamelCase__ ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = normalize_answer(UpperCamelCase__ ).split()
__lowerCamelCase = normalize_answer(UpperCamelCase__ ).split()
__lowerCamelCase = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
__lowerCamelCase = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(UpperCamelCase__ )
__lowerCamelCase = 1.0 * num_same / len(UpperCamelCase__ )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = 0
for hypo, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
return model_prefix.startswith('''rag''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and not hasattr(UpperCamelCase__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
continue
__lowerCamelCase = p if hasattr(UpperCamelCase__ , UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__ , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
return hparams, config
| 67 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
if "model" in sd.keys():
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__ )
lowerCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase = sd.pop(snake_case__ )
lowerCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowerCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 )
lowerCamelCase = q
lowerCamelCase = k
lowerCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
lowerCamelCase = load_checkpoint(snake_case__ )
if config is not None:
lowerCamelCase = OPTConfig.from_pretrained(snake_case__ )
else:
lowerCamelCase = OPTConfig()
lowerCamelCase = OPTModel(snake_case__ ).half().eval()
model.load_state_dict(snake_case__ )
# Check results
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 291 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Optional[Any]= ["transformers", "torch", "note_seq"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
| 285 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 1 |
from sklearn.metrics import recall_score
import datasets
__lowerCAmelCase : List[str] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__lowerCAmelCase : Optional[Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__lowerCAmelCase : Optional[int] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any="binary" , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]="warn" , ) -> Union[str, Any]:
a = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
| 107 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[str] = 0
while number > 0:
__lowerCAmelCase: Any = number % 1_0
sum_of_digits += last_digit
__lowerCAmelCase: List[Any] = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0 ) -> int:
__lowerCAmelCase: Tuple = factorial(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 217 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """LayoutLMv3ImageProcessor"""
UpperCamelCase__ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Dict , )->BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_UpperCAmelCase = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase = features['''words''']
_UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
_UpperCAmelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_UpperCAmelCase = self.get_overflowing_images(__UpperCamelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_UpperCAmelCase = images
return encoded_inputs
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] )->List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}' )
return images_with_overflow
def lowercase__ ( self : List[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Any )->List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple )->Any:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : str )->List[str]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self : Tuple )->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple )->Dict:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 326 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
a =logging.getLogger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''masked_bert'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : str=7_6_8 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Any=5_1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-12 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : Optional[int]="topK" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="constant" ,SCREAMING_SNAKE_CASE__ : str=0.0 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = type_vocab_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : int = pruning_method
__lowerCamelCase : str = mask_init
__lowerCamelCase : Union[str, Any] = mask_scale
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
"""simple docstring"""
import math
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = input('Enter message: ' )
lowerCAmelCase_ : Any = int(input(f"Enter key [2-{len(lowerCAmelCase__ ) - 1}]: " ) )
lowerCAmelCase_ : Union[str, Any] = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCAmelCase_ : List[str] = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('d' ):
lowerCAmelCase_ : List[Any] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = [''] * key
for col in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Tuple = math.ceil(len(lowerCAmelCase__ ) / key )
lowerCAmelCase_ : Dict = key
lowerCAmelCase_ : int = (num_cols * num_rows) - len(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = [''] * num_cols
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase_ : List[str] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 289 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ : Any = TypeVar("""_T""")
class UpperCamelCase__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : List[str] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : _T ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = self._stacka.pop
lowerCAmelCase_ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
lowercase__ : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
lowercase__ : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = """whisper"""
_lowerCAmelCase : Any = ["""past_key_values"""]
_lowerCAmelCase : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , lowercase_ : Optional[Any]=51865 , lowercase_ : str=80 , lowercase_ : List[str]=6 , lowercase_ : Union[str, Any]=4 , lowercase_ : Dict=6 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=1536 , lowercase_ : Any=1536 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=50257 , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any="gelu" , lowercase_ : str=256 , lowercase_ : str=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[Any]=False , lowercase_ : Union[str, Any]=1500 , lowercase_ : Optional[int]=448 , lowercase_ : Union[str, Any]=50256 , lowercase_ : int=50256 , lowercase_ : Optional[Any]=50256 , lowercase_ : Optional[int]=None , lowercase_ : Any=[220, 50256] , lowercase_ : Union[str, Any]=False , lowercase_ : int=256 , lowercase_ : Union[str, Any]=False , lowercase_ : int=0.05 , lowercase_ : Union[str, Any]=10 , lowercase_ : str=2 , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=7 , **lowercase_ : str , ):
snake_case_ : int = vocab_size
snake_case_ : Any = num_mel_bins
snake_case_ : int = d_model
snake_case_ : str = encoder_layers
snake_case_ : Optional[int] = encoder_attention_heads
snake_case_ : str = decoder_layers
snake_case_ : Dict = decoder_attention_heads
snake_case_ : Union[str, Any] = decoder_ffn_dim
snake_case_ : Union[str, Any] = encoder_ffn_dim
snake_case_ : List[str] = dropout
snake_case_ : int = attention_dropout
snake_case_ : Tuple = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : List[str] = init_std
snake_case_ : Union[str, Any] = encoder_layerdrop
snake_case_ : str = decoder_layerdrop
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : Optional[int] = max_source_positions
snake_case_ : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
snake_case_ : Optional[int] = classifier_proj_size
snake_case_ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : Optional[int] = apply_spec_augment
snake_case_ : Any = mask_time_prob
snake_case_ : Dict = mask_time_length
snake_case_ : int = mask_time_min_masks
snake_case_ : Optional[Any] = mask_feature_prob
snake_case_ : List[Any] = mask_feature_length
snake_case_ : Dict = mask_feature_min_masks
snake_case_ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class _UpperCAmelCase ( lowerCAmelCase__):
@property
def _snake_case ( self : int ):
snake_case_ : List[Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ : Optional[Any] = {0: '''batch'''}
else:
snake_case_ : str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
return common_inputs
def _snake_case ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ):
snake_case_ : Optional[int] = OrderedDict()
snake_case_ : Optional[int] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
snake_case_ : Union[str, Any] = encoder_inputs['''input_features'''].shape[2]
snake_case_ : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
snake_case_ : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[Any] = encoder_inputs.pop('''input_features''' )
snake_case_ : int = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
snake_case_ : Dict = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _snake_case ( self : Optional[int] ):
return 1E-3
| 264 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A_( A : str , A : Optional[int] , A : Tuple = False):
if radian_mode:
return [magnitude * cos(__lowerCAmelCase), magnitude * sin(__lowerCAmelCase)]
return [magnitude * cos(radians(__lowerCAmelCase)), magnitude * sin(radians(__lowerCAmelCase))]
def A_( A : Optional[int] , A : Tuple , A : Union[str, Any] = 10**-1):
UpperCamelCase = cross(__lowerCAmelCase , __lowerCAmelCase)
UpperCamelCase = sum(__lowerCAmelCase)
return abs(__lowerCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase : Optional[Any] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowerCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase : Union[str, Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowerCAmelCase : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase : List[str] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowerCAmelCase : Tuple = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : int = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Tuple = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
lowerCAmelCase : Union[str, Any] = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(A_ , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**A_ )
UpperCamelCase = do_lower_case
def UpperCAmelCase_ ( self , A_ , **A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , A_ )
UpperCamelCase = kwargs.pop('return_tensors' , A_ )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(A_ ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(A_ , A_ , return_tensors=A_ , **A_ )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(A_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_ )
UpperCamelCase = {key: item for key, item in output_data.items() if len(A_ ) != 0}
return BatchEncoding(A_ , tensor_type=A_ )
def UpperCAmelCase_ ( self , A_ , A_=None )-> Any:
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 251 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : int = ''
while len(_lowerCAmelCase ) % 3 != 0:
__lowerCamelCase : str = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : Tuple = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE : Dict = get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def lowercase__( __UpperCamelCase: dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 367 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = '''visual_bert'''
def __init__( self, A=30_522, A=768, A=512, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=False, A=True, A=1, A=0, A=2, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : int = visual_embedding_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = bypass_transformer
SCREAMING_SNAKE_CASE : Any = special_visual_initialize
| 246 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def _A (self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase="uniform_average" , lowerCAmelCase=True ):
__lowercase= mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse}
| 359 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304 | 0 |
"""simple docstring"""
import sys
a : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->int:
'''simple docstring'''
a : List[str] = 1
for digit in s:
product *= int(_lowercase )
return product
def _SCREAMING_SNAKE_CASE ( _lowercase : str = N ) ->int:
'''simple docstring'''
a : int = -sys.maxsize - 1
a : Union[str, Any] = n[:13]
a : Any = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a : Union[str, Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
a : Tuple = max(_lowercase , str_eval(_lowercase ) )
a : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCamelCase ( snake_case__, snake_case__=0.999, snake_case__="cosine", ) -> List[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCAmelCase : Union[str, Any] = []
for i in range(snake_case__ ):
__UpperCAmelCase : Union[str, Any] = i / num_diffusion_timesteps
__UpperCAmelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ), snake_case__ ) )
return torch.tensor(snake_case__, dtype=torch.floataa )
class _snake_case ( _lowercase , _lowercase ):
lowerCamelCase__: Dict = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase__: Optional[int] = 2
@register_to_config
def __init__( self: Optional[Any] , __lowerCamelCase: int = 10_00 , __lowerCamelCase: float = 0.0_00_85 , __lowerCamelCase: float = 0.0_12 , __lowerCamelCase: str = "linear" , __lowerCamelCase: Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase: str = "epsilon" , __lowerCamelCase: str = "linspace" , __lowerCamelCase: int = 0 , ) -> int:
if trained_betas is not None:
__UpperCAmelCase : Any = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCAmelCase : Optional[Any] = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase : Optional[int] = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__UpperCAmelCase : Union[str, Any] = 1.0 - self.betas
__UpperCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any]=None ) -> int:
if schedule_timesteps is None:
__UpperCAmelCase : List[str] = self.timesteps
__UpperCAmelCase : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCAmelCase : Union[str, Any] = 1 if len(__lowerCamelCase ) > 1 else 0
else:
__UpperCAmelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
__UpperCAmelCase : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self: Dict , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__UpperCAmelCase : Optional[int] = self.index_for_timestep(__lowerCamelCase )
if self.state_in_first_order:
__UpperCAmelCase : List[Any] = self.sigmas[step_index]
else:
__UpperCAmelCase : Optional[int] = self.sigmas_interpol[step_index]
__UpperCAmelCase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Union[str, torch.device] = None , __lowerCamelCase: Optional[int] = None , ) -> Optional[int]:
__UpperCAmelCase : List[str] = num_inference_steps
__UpperCAmelCase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCAmelCase : Any = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCAmelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : List[str] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : Dict = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__UpperCAmelCase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCAmelCase : Tuple = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase )
__UpperCAmelCase : Any = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
__UpperCAmelCase : str = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
# interpolate sigmas
__UpperCAmelCase : Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__UpperCAmelCase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCAmelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowerCamelCase ).startswith("mps" ):
# mps does not support float64
__UpperCAmelCase : Dict = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa )
else:
__UpperCAmelCase : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
# interpolate timesteps
__UpperCAmelCase : List[str] = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype )
__UpperCAmelCase : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__UpperCAmelCase : int = torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCAmelCase : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCAmelCase : List[Any] = defaultdict(__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[Any] ) -> List[str]:
# get log sigma
__UpperCAmelCase : List[str] = sigma.log()
# get distribution
__UpperCAmelCase : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCAmelCase : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCAmelCase : int = low_idx + 1
__UpperCAmelCase : str = self.log_sigmas[low_idx]
__UpperCAmelCase : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCAmelCase : str = (low - log_sigma) / (low - high)
__UpperCAmelCase : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
__UpperCAmelCase : Any = (1 - w) * low_idx + w * high_idx
__UpperCAmelCase : str = t.view(sigma.shape )
return t
@property
def _lowerCamelCase ( self: str ) -> int:
return self.sample is None
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase: Union[float, torch.FloatTensor] , __lowerCamelCase: Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
__UpperCAmelCase : int = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
__UpperCAmelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCAmelCase : List[Any] = self.sigmas[step_index]
__UpperCAmelCase : Optional[Any] = self.sigmas_interpol[step_index + 1]
__UpperCAmelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCAmelCase : Tuple = self.sigmas[step_index - 1]
__UpperCAmelCase : str = self.sigmas_interpol[step_index]
__UpperCAmelCase : Union[str, Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCAmelCase : str = 0
__UpperCAmelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCAmelCase : List[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCAmelCase : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCAmelCase : Optional[Any] = sigma_next - sigma_hat
__UpperCAmelCase : Tuple = self.sample
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
__UpperCAmelCase : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCAmelCase : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCAmelCase : Optional[Any] = self.timesteps.to(original_samples.device )
__UpperCAmelCase : Optional[Any] = timesteps.to(original_samples.device )
__UpperCAmelCase : List[Any] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
__UpperCAmelCase : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Optional[Any] = sigma.unsqueeze(-1 )
__UpperCAmelCase : str = original_samples + noise * sigma
return noisy_samples
def __len__( self: str ) -> Tuple:
return self.config.num_train_timesteps
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 256
# Modulus to hash a string
UpperCAmelCase_ : List[Any] = 100_0003
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = len(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = """abc1abc12"""
_SCREAMING_SNAKE_CASE : Any = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 2)
_SCREAMING_SNAKE_CASE : Dict = """ABABX"""
_SCREAMING_SNAKE_CASE : Optional[int] = """ABABZABABYABABX"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 3)
_SCREAMING_SNAKE_CASE : Optional[int] = """AAAB"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 4)
_SCREAMING_SNAKE_CASE : Optional[int] = """abcdabcy"""
_SCREAMING_SNAKE_CASE : Any = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 5)
_SCREAMING_SNAKE_CASE : List[str] = """Lü"""
_SCREAMING_SNAKE_CASE : Optional[int] = """Lüsai"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = """Lue"""
assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 200 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Optional[int] = """canine"""
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_6384 , __snake_case=16 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case=0XE000 , __snake_case=0XE001 , __snake_case=4 , __snake_case=4 , __snake_case=8 , __snake_case=1_6384 , __snake_case=128 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
# Character config:
_SCREAMING_SNAKE_CASE : Any = downsampling_rate
_SCREAMING_SNAKE_CASE : List[str] = upsampling_kernel_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hash_functions
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hash_buckets
_SCREAMING_SNAKE_CASE : Union[str, Any] = local_transformer_stride
| 200 | 1 |
"""simple docstring"""
import itertools
import math
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ) ->str:
a__: Optional[Any] = 2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def __a ( _SCREAMING_SNAKE_CASE = 10001 ) ->int:
return next(itertools.islice(prime_generator() , nth - 1 , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 203 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowercase__ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowercase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["""input_ids""", """attention_mask"""]
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Tuple = vocab_file
a__: Union[str, Any] = False if not self.vocab_file else True
a__: Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
a__: int = {
lang_code: self.convert_tokens_to_ids(lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__: List[Any] = src_lang if src_lang is not None else 'en_XX'
a__: Tuple = self.convert_tokens_to_ids(self._src_lang)
a__: str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.sep_token_id]
a__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
a__: Union[str, Any] = src_lang
a__: Any = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase)
a__: str = self.convert_tokens_to_ids(lowercase)
a__: Any = tgt_lang_id
return inputs
def lowerCamelCase_ ( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__: Any = src_lang
a__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: int = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: List[str] = [self.eos_token_id, self.cur_lang_code]
a__: Dict = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Any = self.convert_ids_to_tokens(self.suffix_tokens)
a__: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: str = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: Dict = [self.eos_token_id, self.cur_lang_code]
a__: Any = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
a__: Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
return (out_vocab_file,)
| 203 | 1 |
import numpy as np
def __A ( __lowerCamelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def __A ( __lowerCamelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCamelCase : Optional[Any] = False
@skip_mps
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase__ = False
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = a = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = torch.manual_seed(51 )
a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = """a painting of an elephant with glasses"""
a = [5, 7]
a = pipe(
prompt=__magic_name__ , token_indices=__magic_name__ , guidance_scale=7.5 , generator=__magic_name__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 228 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ : Dict = ["pixel_values"]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ):
super().__init__(**_snake_case )
_lowerCamelCase = size if size is not None else {"shortest_edge": 2_2_4}
_lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
_lowerCamelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case , param_name='''crop_size''' )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_center_crop
_lowerCamelCase = crop_size
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase = do_convert_rgb
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowerCamelCase = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(_snake_case , param_name='''size''' , default_to_square=_snake_case )
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase = get_size_dict(_snake_case , param_name='''crop_size''' , default_to_square=_snake_case )
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
_lowerCamelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
_lowerCamelCase = {"pixel_values": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 351 |
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def A__ ( *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
snake_case__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowercase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = vqa_pipeline(__lowerCAmelCase , top_k=1 )
self.assertEqual(
__lowerCAmelCase , [
[{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}],
[{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}],
] , )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase = """How many cats are there?"""
lowercase = vqa_pipeline(image=__lowerCAmelCase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}, {"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}] )
lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}, {"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}] )
@slow
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase = """How many cats are there?"""
lowercase = vqa_pipeline(image=__lowerCAmelCase , question=__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
lowercase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def A__ ( self ):
"""simple docstring"""
pass
| 197 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[int] = ['torch', 'torchsde']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 197 | 1 |
'''simple docstring'''
import numpy as np
def snake_case_ ( __SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase =get_tests_dir("fixtures")
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : str ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
try:
delete_repo(token=cls._token ,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub('test-feature-extractor' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(snake_case ,snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case ,repo_id='test-feature-extractor' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(snake_case ,snake_case ) )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(snake_case ,snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case ,repo_id='valid_org/test-feature-extractor-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(snake_case ,snake_case ) )
def _lowerCAmelCase ( self : Dict ):
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE =CustomFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} ,)
SCREAMING_SNAKE_CASE =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,'CustomFeatureExtractor' )
| 334 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a: Any = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 | '''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(UpperCAmelCase ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
lowercase__ : List[Any] = 1.0 - self.betas
lowercase__ : int = torch.cumprod(self.alphas , dim=0 )
lowercase__ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowercase__ : Tuple = variance_type
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
lowercase__ : List[str] = num_inference_steps
lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if prev_timestep is None:
lowercase__ : Any = t - 1
lowercase__ : Any = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Tuple = self.betas[t]
else:
lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowercase__ : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : Union[str, Any] = variance.log()
lowercase__ : Optional[int] = beta.log()
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowercase__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : int = t - 1
lowercase__ : Optional[int] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[int] = self.betas[t]
lowercase__ : Optional[Any] = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[Any] = 0
if t > 0:
lowercase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowercase__ : Union[str, Any] = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : List[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowercase__ : List[str] = variance * variance_noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 214 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowercase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
pass
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE__ : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : str = pipeline(
"document-question-answering" , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
lowerCamelCase : Optional[Any] = "What is the placebo?"
lowerCamelCase : int = [
{
"image": load_image(UpperCamelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Dict = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase : Union[str, Any] = INVOICE_URL
lowerCamelCase : Tuple = "How many cats are there?"
lowerCamelCase : Dict = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
lowerCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[Any] = []
lowerCamelCase : Optional[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : str = "What is the invoice number?"
lowerCamelCase : int = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Optional[Any] = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Union[str, Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase : int = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : str = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase : Tuple = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase : str = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self ) -> Tuple:
pass
| 48 |
"""simple docstring"""
import math
lowerCamelCase__ : List[Any] = 10
lowerCamelCase__ : Optional[int] = 7
lowerCamelCase__ : Dict = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _lowerCAmelCase : int = 20 ) -> str:
_UpperCAmelCase : List[str] = math.comb(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, _lowerCAmelCase )
_UpperCAmelCase : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 246 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[int] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Tuple ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any]=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
UpperCAmelCase_ = 10.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 177 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''fnet'''
def __init__(self : Optional[int] , _lowerCAmelCase : int=3_2000 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : int=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : int="gelu_new" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : int=2 , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = use_tpu_fourier_optimizations
A = tpu_short_seq_length
| 258 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCAmelCase = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = cn.convert_to_negative(UpperCamelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase( ) -> str:
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase_ , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase = canny.canny(UpperCamelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def lowercase( ) -> List[str]:
'''simple docstring'''
assert gg.gaussian_filter(UpperCamelCase_ , 5 , sigma=0.9 ).all()
def lowercase( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
UpperCamelCase = conv.img_convolve(UpperCamelCase_ , UpperCamelCase_ ).astype(UpperCamelCase_ )
assert res.any()
def lowercase( ) -> Optional[int]:
'''simple docstring'''
assert med.median_filter(UpperCamelCase_ , 3 ).any()
def lowercase( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = sob.sobel_filter(UpperCamelCase_ )
assert grad.any() and theta.any()
def lowercase( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = sp.make_sepia(UpperCamelCase_ , 20 )
assert sepia.all()
def lowercase( UpperCamelCase_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = bs.Burkes(imread(UpperCamelCase_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowercase( UpperCamelCase_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Tuple:
'''simple docstring'''
UpperCamelCase = rs.NearestNeighbour(imread(UpperCamelCase_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
UpperCamelCase = imread(UpperCamelCase_ , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = image[x_coordinate][y_coordinate]
UpperCamelCase = lbp.get_neighbors_pixel(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase = lbp.local_binary_value(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert lbp_image.any()
| 357 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_SCREAMING_SNAKE_CASE = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCamelCase = list(s_dict.keys() )
for key in keys:
UpperCamelCase = R""".*/layers_(\d+)"""
UpperCamelCase = key
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , UpperCamelCase_ )
UpperCamelCase = R"""(encoder|decoder)\/"""
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = re.match(UpperCamelCase_ , UpperCamelCase_ ).groups()
if groups[0] == "encoder":
UpperCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , UpperCamelCase_ )
UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , UpperCamelCase_ )
elif groups[0] == "decoder":
UpperCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , UpperCamelCase_ )
UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , UpperCamelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCamelCase = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
print(f"""{key} -> {new_key}""" )
UpperCamelCase = s_dict.pop(UpperCamelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCamelCase = s_dict[key].shape[0]
UpperCamelCase = s_dict[key]
for idx in range(UpperCamelCase_ ):
UpperCamelCase = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase_ )
return s_dict
_SCREAMING_SNAKE_CASE = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCamelCase_ , """r""" ) as f:
UpperCamelCase = f.read()
UpperCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , UpperCamelCase_ )
UpperCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCamelCase = float(UpperCamelCase_ ) if """.""" in value else int(UpperCamelCase_ )
UpperCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , UpperCamelCase_ )[0]
UpperCamelCase = str(activation[1] )
UpperCamelCase = num_experts
UpperCamelCase = SwitchTransformersConfig(**UpperCamelCase_ )
return config
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="./" , UpperCamelCase_=8 ) -> Optional[int]:
'''simple docstring'''
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCamelCase = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
if gin_file is not None:
UpperCamelCase = convert_gin_to_config(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase_ )
UpperCamelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase_ )
UpperCamelCase = flax_params["""target"""]
UpperCamelCase = flatten_dict(UpperCamelCase_ , sep="""/""" )
UpperCamelCase = rename_keys(UpperCamelCase_ )
UpperCamelCase = unflatten_dict(UpperCamelCase_ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase_ , UpperCamelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 165 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCAmelCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=9_9 , _lowerCamelCase=0 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase="last" , _lowerCamelCase=None , _lowerCamelCase=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_lengths
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = gelu_activation
lowercase = sinusoidal_embeddings
lowercase = causal
lowercase = asm
lowercase = n_langs
lowercase = vocab_size
lowercase = n_special
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = summary_type
lowercase = use_proj
lowercase = scope
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_input_lengths:
lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , 2 ).float()
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = FlaubertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , lengths=lowerCAmelCase__ , langs=lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ , langs=lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = FlaubertWithLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = FlaubertForQuestionAnsweringSimple(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = FlaubertForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
lowercase = model(
lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , p_mask=lowerCAmelCase__ , )
lowercase = model(
lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , )
(lowercase) = result_with_labels.to_tuple()
lowercase = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
(lowercase) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = FlaubertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = self.num_labels
lowercase = FlaubertForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = self.num_choices
lowercase = FlaubertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
(
lowercase
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class a ( _lowerCAmelCase, _lowerCAmelCase, unittest.TestCase ):
UpperCAmelCase_ : Dict =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[int] =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def UpperCamelCase_ ( self ):
lowercase = FlaubertModelTester(self )
lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=3_7 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase__ )
@slow
def UpperCamelCase_ ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = FlaubertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase = True
lowercase = model_class(config=lowerCAmelCase__ )
lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) )
lowercase = torch.jit.load(os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
lowercase = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
lowercase = model(lowerCAmelCase__ )[0]
lowercase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 220 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=_UpperCAmelCase , steps=4 )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase )
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 )
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ = torch.rand((1, 1, 10) )
UpperCAmelCase_ = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=_UpperCAmelCase )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 241 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : Dict , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : int , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : str , *_UpperCAmelCase : int , **_UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Dict , *_UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : str , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Any , **_UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : int , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
| 241 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[str] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "trocr"
__A = ["past_key_values"]
__A = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__(self , lowerCamelCase_=50265 , lowerCamelCase_=1024 , lowerCamelCase_=12 , lowerCamelCase_=16 , lowerCamelCase_=4096 , lowerCamelCase_="gelu" , lowerCamelCase_=512 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = d_model
a = decoder_layers
a = decoder_attention_heads
a = decoder_ffn_dim
a = activation_function
a = max_position_embeddings
a = dropout
a = attention_dropout
a = activation_dropout
a = init_std
a = decoder_layerdrop
a = use_cache
a = scale_embedding
a = use_learned_position_embeddings
a = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 227 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 'xlm-roberta-xl'
def __init__(self : List[str] , UpperCAmelCase_ : Optional[Any]=250_880 , UpperCAmelCase_ : List[str]=2_560 , UpperCAmelCase_ : List[Any]=36 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Dict=10_240 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=514 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : str=1E-0_5 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : List[Any] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: List[str] =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: Dict =intermediate_size
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: Tuple =max_position_embeddings
lowerCamelCase__: List[str] =type_vocab_size
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Optional[Any] =layer_norm_eps
lowerCamelCase__: Optional[Any] =position_embedding_type
lowerCamelCase__: Union[str, Any] =use_cache
lowerCamelCase__: str =classifier_dropout
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: int ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: int ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 361 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273 | 0 |
import math
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
return math.pow(__A, 2 ) - a
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = 2.0
while start <= a:
UpperCAmelCase__ = math.pow(__A, 2 )
return start
def lowerCAmelCase_ ( __A, __A = 9_999, __A = 0.00000000000001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
UpperCAmelCase__ = get_initial_point(__A )
for _ in range(__A ):
UpperCAmelCase__ = value
UpperCAmelCase__ = value - fx(__A, __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 65 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Any ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __lowerCamelCase ( __magic_name__ : np.ndarray , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] = None ):
a__: Optional[Any] =tesseract_config if tesseract_config is not None else ""
# apply OCR
a__: str =to_pil_image(__magic_name__ )
a__: Any =pil_image.size
a__: Optional[Any] =pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type="dict" , config=__magic_name__ )
a__: Dict =data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a__: Optional[int] =[idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
a__: Optional[int] =[word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
a__: Tuple =[coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
a__: Any =[coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
a__: List[Any] =[coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
a__: str =[coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a__: Optional[int] =[]
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
a__: Optional[int] =[x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
a__: Tuple =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = ['''pixel_values''']
def __init__( self : int , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : bool = True , _a : Optional[str] = None , _a : Optional[str] = "" , **_a : List[Any] , ):
super().__init__(**_a )
a__: Optional[Any] =size if size is not None else {"height": 2_2_4, "width": 2_2_4}
a__: List[str] =get_size_dict(_a )
a__: List[str] =do_resize
a__: Any =size
a__: List[Any] =resample
a__: Union[str, Any] =apply_ocr
a__: Any =ocr_lang
a__: Dict =tesseract_config
def _lowerCamelCase ( self : Tuple , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Tuple , ):
a__: Optional[int] =get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
a__: str =(size["height"], size["width"])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCamelCase ( self : Optional[Any] , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Optional[str] = None , _a : Optional[str] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : Any , ):
a__: Any =do_resize if do_resize is not None else self.do_resize
a__: List[Any] =size if size is not None else self.size
a__: List[str] =get_size_dict(_a )
a__: List[str] =resample if resample is not None else self.resample
a__: Optional[Any] =apply_ocr if apply_ocr is not None else self.apply_ocr
a__: int =ocr_lang if ocr_lang is not None else self.ocr_lang
a__: Any =tesseract_config if tesseract_config is not None else self.tesseract_config
a__: Any =make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
a__: Optional[int] =[to_numpy_array(_a ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
a__: Optional[int] =[]
a__: Optional[int] =[]
for image in images:
a__: List[Any] =apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
a__: List[Any] =[self.resize(image=_a , size=_a , resample=_a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
a__: Optional[Any] =[flip_channel_order(_a ) for image in images]
a__: Dict =[to_channel_dimension_format(_a , _a ) for image in images]
a__: Union[str, Any] =BatchFeature(data={"pixel_values": images} , tensor_type=_a )
if apply_ocr:
a__: str =words_batch
a__: Optional[int] =boxes_batch
return data
| 357 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase__ ( _a ):
@require_torch
def _lowerCamelCase ( self : Union[str, Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Dict ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: Dict ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: Tuple ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Optional[int] =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: str =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Union[str, Any] ="1"
a__: Any =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : str ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Optional[int] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Tuple ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: List[Any] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: str ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Any =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Union[str, Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Tuple ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__: str ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__: int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__: Union[str, Any] =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Optional[Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__: int =[sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Tuple ="1"
a__: Dict =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Dict ="\nfrom transformers import pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__: List[str] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__: Dict =self.get_env()
a__: Optional[Any] ="1"
a__: Dict =[sys.executable, "-c", "\n".join([load, mock, run] )]
a__: Optional[int] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] ="\nfrom transformers import AutoModel\n "
a__: str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__: Tuple =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Any =self.get_env()
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: List[Any] ="1"
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 42 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
__lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__lowerCAmelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 10)
UpperCamelCase__ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase__ = """"""
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 92 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 370 |
from math import isqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 10**8):
SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 327 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = 0
@slow
def _lowerCamelCase ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
# Check that tokenizer_type ≠ model_type
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _lowerCamelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' , use_fast=__A )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' , use_fast=__A )
self.assertIsInstance(__A , __A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : int ):
with pytest.raises(__A ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def _lowerCamelCase ( self : Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCamelCase = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
if isinstance(__A , __A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __A )
else:
self.assertEqual(tokenizer.do_lower_case , __A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def _lowerCamelCase ( self : List[str] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__A , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__UpperCamelCase = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def _lowerCamelCase ( self : List[Any] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__UpperCamelCase = TOKENIZER_MAPPING.values()
__UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__A ) , __A )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__A )
__UpperCamelCase = 'Hello, world. How are you?'
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertEqual('[UNK]' , tokens[0] )
__UpperCamelCase = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__A )
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__A ) , __A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Tuple ):
# Check we can load the tokenizer config of an online model.
__UpperCamelCase = get_tokenizer_config('bert-base-cased' )
__UpperCamelCase = config.pop('_commit_hash' , __A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__A , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCamelCase = get_tokenizer_config(__A )
self.assertDictEqual(__A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = get_tokenizer_config(__A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def _lowerCamelCase ( self : List[str] ):
try:
AutoConfig.register('custom' , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
__UpperCamelCase = CustomTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
try:
AutoConfig.register('custom' , __A )
# Can register in two steps
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__A , slow_tokenizer_class=__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
__UpperCamelCase = CustomTokenizerFast.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , use_fast=__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A , use_fast=__A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def _lowerCamelCase ( self : Any ):
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =False
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =NewTokenizer
SCREAMING_SNAKE_CASE_ : str =False
try:
AutoConfig.register('custom' , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _lowerCamelCase ( self : int ):
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoTokenizer.from_pretrained('bert-base' )
def _lowerCamelCase ( self : str ):
with self.assertRaisesRegex(
__A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , revision='aaaaaa' )
def _lowerCamelCase ( self : Dict ):
# Make sure we have cached the tokenizer.
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 53 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
snake_case_ = logging.get_logger(__name__)
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : type , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowercase__ : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowercase__ : int = format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Exception , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None ):
'''simple docstring'''
lowercase__ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
snake_case_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
snake_case_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
snake_case_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Tuple = get_format_type_from_alias(SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 214 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=None ):
__lowercase = self.layer[current_layer](lowercase__ ,lowercase__ ,head_mask[current_layer] )
__lowercase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__(lowercase__ )
__lowercase = BertEncoderWithPabee(lowercase__ )
self.init_weights()
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ):
__lowercase = threshold
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
__lowercase = patience
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.inference_layers_num / self.inference_instances_num
__lowercase = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int=None ,lowercase__ : int=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=None ,lowercase__ : List[str]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Tuple=None ,lowercase__ : List[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=False ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowercase = input_ids.size()
elif inputs_embeds is not None:
__lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase = torch.ones(lowercase__ ,device=lowercase__ )
if token_type_ids is None:
__lowercase = torch.zeros(lowercase__ ,dtype=torch.long ,device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase = self.get_extended_attention_mask(lowercase__ ,lowercase__ ,lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowercase , __lowercase , __lowercase = encoder_hidden_states.size()
__lowercase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowercase = torch.ones(lowercase__ ,device=lowercase__ )
__lowercase = self.invert_attention_mask(lowercase__ )
else:
__lowercase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase = self.get_head_mask(lowercase__ ,self.config.num_hidden_layers )
__lowercase = self.embeddings(
input_ids=lowercase__ ,position_ids=lowercase__ ,token_type_ids=lowercase__ ,inputs_embeds=lowercase__ )
__lowercase = embedding_output
if self.training:
__lowercase = []
for i in range(self.config.num_hidden_layers ):
__lowercase = self.encoder.adaptive_forward(
lowercase__ ,current_layer=lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ )
__lowercase = self.pooler(lowercase__ )
__lowercase = output_layers[i](output_dropout(lowercase__ ) )
res.append(lowercase__ )
elif self.patience == 0: # Use all layers for inference
__lowercase = self.encoder(
lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = self.pooler(encoder_outputs[0] )
__lowercase = [output_layers[self.config.num_hidden_layers - 1](lowercase__ )]
else:
__lowercase = 0
__lowercase = None
__lowercase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowercase = self.encoder.adaptive_forward(
lowercase__ ,current_layer=lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ )
__lowercase = self.pooler(lowercase__ )
__lowercase = output_layers[i](lowercase__ )
if regression:
__lowercase = logits.detach()
if patient_result is not None:
__lowercase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowercase = 0
else:
__lowercase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowercase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowercase__ ) ):
patient_counter += 1
else:
__lowercase = 0
__lowercase = logits
if patient_counter == self.patience:
break
__lowercase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Optional[int] ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = BertModelWithPabee(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str=None ,lowercase__ : Any=None ,lowercase__ : str=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,):
__lowercase = self.bert(
input_ids=lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
__lowercase = (logits[-1],)
if labels is not None:
__lowercase = None
__lowercase = 0
for ix, logits_item in enumerate(lowercase__ ):
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits_item.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits_item.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
if total_loss is None:
__lowercase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowercase = (total_loss / total_weights,) + outputs
return outputs
| 52 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : str ,lowercase__ : List[Any]=1_3 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Any=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] ,lowercase__ : List[Any]=[2, 2, 3, 2] ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : int=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Tuple=1_0 ,lowercase__ : int=0.0_2 ,lowercase__ : Any=["stage2", "stage3", "stage4"] ,lowercase__ : Optional[Any]=3 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = out_features
__lowercase = num_labels
__lowercase = scope
__lowercase = num_stages
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=5_1_2 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=lowercase__ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=4_0 ,auxiliary_channels=2_5_6 ,auxiliary_num_convs=1 ,auxiliary_concat_input=lowercase__ ,loss_ignore_index=2_5_5 ,num_labels=self.num_labels ,)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = UperNetForSemanticSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = UperNetModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(lowercase__ )
__lowercase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = UperNetForSemanticSegmentation.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__lowercase = Image.open(A__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
| 52 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase__ : Dict = 4
UpperCamelCase__ : str = 3
class _lowerCAmelCase ( __A ):
"""simple docstring"""
pass
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
for shard in shards:
for i in range(a_ ):
yield {"i": i, "shard": shard}
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Optional[int] = int(os.environ["""RANK"""] )
A_ : List[Any] = int(os.environ["""WORLD_SIZE"""] )
A_ : Tuple = ArgumentParser()
parser.add_argument("""--streaming""" , type=a_ )
parser.add_argument("""--local_rank""" , type=a_ )
parser.add_argument("""--num_workers""" , type=a_ , default=0 )
A_ : Optional[Any] = parser.parse_args()
A_ : List[Any] = args.streaming
A_ : Dict = args.num_workers
A_ : int = {"""shards""": [F"shard_{shard_idx}" for shard_idx in range(a_ )]}
A_ : Dict = IterableDataset.from_generator(a_ , gen_kwargs=a_ )
if not streaming:
A_ : Optional[int] = Dataset.from_list(list(a_ ) )
A_ : Tuple = split_dataset_by_node(a_ , rank=a_ , world_size=a_ )
A_ : List[Any] = torch.utils.data.DataLoader(a_ , num_workers=a_ )
A_ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
A_ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
A_ : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCamelCase_ = (low + high) // 2
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = max_subarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = max_subarray(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = max_cross_sum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> tuple[int, int, float]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = float("-inf" ), -1
UpperCamelCase_ , UpperCamelCase_ = float("-inf" ), -1
UpperCamelCase_ = 0
for i in range(SCREAMING_SNAKE_CASE_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCamelCase_ = summ
UpperCamelCase_ = i
UpperCamelCase_ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCamelCase_ = summ
UpperCamelCase_ = i
return max_left, max_right, (left_sum + right_sum)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> float:
"""simple docstring"""
UpperCamelCase_ = [randint(1 , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase_ = time.time()
max_subarray(SCREAMING_SNAKE_CASE_ , 0 , input_size - 1 )
UpperCamelCase_ = time.time()
return end - start
def lowerCAmelCase( )-> None:
"""simple docstring"""
UpperCamelCase_ = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
UpperCamelCase_ = [time_max_subarray(SCREAMING_SNAKE_CASE_ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print(SCREAMING_SNAKE_CASE_ , "\t\t" , SCREAMING_SNAKE_CASE_ )
plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :Optional[Any] = 32
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"] )
UpperCamelCase_ = int(config["seed"] )
UpperCamelCase_ = int(config["batch_size"] )
UpperCamelCase_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
UpperCamelCase_ = evaluate.load("glue" , "mrpc" )
UpperCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCamelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_ = f"epoch_{epoch}"
UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = accuracy
UpperCamelCase_ = lr_scheduler.get_lr()[0]
UpperCamelCase_ = optimizer.param_groups[0]["lr"]
UpperCamelCase_ = epoch
UpperCamelCase_ = overall_step
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 60 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_lowerCamelCase =list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'rb' ) as f:
SCREAMING_SNAKE_CASE =Image.open(lowerCAmelCase_ )
return im.convert('RGB' )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__UpperCAmelCase = field(default=lowerCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
__UpperCAmelCase = field(default=lowerCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
__UpperCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCAmelCase ( self : Dict ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase_ )} , )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__UpperCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCAmelCase = field(default=lowerCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE =torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification', lowerCAmelCase_, lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='image-classification', use_auth_token=True if model_args.use_auth_token else None, )
else:
SCREAMING_SNAKE_CASE ={}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE =os.path.join(data_args.train_dir, '**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE =os.path.join(data_args.validation_dir, '**' )
SCREAMING_SNAKE_CASE =load_dataset(
'imagefolder', data_files=lowerCAmelCase_, cache_dir=model_args.cache_dir, task='image-classification', )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE =None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE =dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE =split['train']
SCREAMING_SNAKE_CASE =split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE =dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ={}, {}
for i, label in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =str(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE =evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(lowerCAmelCase_ ), labelaid=lowerCAmelCase_, idalabel=lowerCAmelCase_, finetuning_task='image-classification', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE =Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
SCREAMING_SNAKE_CASE =Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE =Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =[
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =[_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE =(
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE =(
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
SCREAMING_SNAKE_CASE =Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=dataset['train'] if training_args.do_train else None, eval_dataset=dataset['validation'] if training_args.do_eval else None, compute_metrics=lowerCAmelCase_, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE =last_checkpoint
SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Union[str, Any]:
__UpperCamelCase = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
__UpperCamelCase = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
__UpperCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
__UpperCamelCase = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
from math import isqrt
def A ( snake_case :int ) -> list[int]:
__UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
__UpperCamelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def A ( snake_case :int = 1_0**8 ) -> int:
__UpperCamelCase = calculate_prime_numbers(max_number // 2 )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mctct"
def __init__( self : Union[str, Any] , lowercase_ : str=8065 , lowercase_ : Optional[Any]=1536 , lowercase_ : str=36 , lowercase_ : List[str]=6144 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=384 , lowercase_ : Tuple=920 , lowercase_ : Any=1e-5 , lowercase_ : Optional[Any]=0.3 , lowercase_ : Any="relu" , lowercase_ : Any=0.02 , lowercase_ : Dict=0.3 , lowercase_ : int=0.3 , lowercase_ : Union[str, Any]=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0.3 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=(7,) , lowercase_ : Union[str, Any]=(3,) , lowercase_ : Tuple=80 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , lowercase_ : Any="sum" , lowercase_ : List[Any]=False , **lowercase_ : Any , ):
'''simple docstring'''
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = attention_head_dim
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layerdrop
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = pad_token_id
SCREAMING_SNAKE_CASE_ : Tuple = bos_token_id
SCREAMING_SNAKE_CASE_ : int = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = conv_glu_dim
SCREAMING_SNAKE_CASE_ : List[str] = conv_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_conv_layers
SCREAMING_SNAKE_CASE_ : Tuple = input_feat_per_channel
SCREAMING_SNAKE_CASE_ : Optional[int] = input_channels
SCREAMING_SNAKE_CASE_ : List[str] = conv_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : str = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = list(lowercase_)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F'but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.')
| 91 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase_ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
lowercase__ : int = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
lowercase__ : int = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
lowercase__ : List[str] = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __lowerCamelCase )
# ffn -> feed_forward
lowercase__ : Optional[Any] = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
lowercase__ : Dict = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
lowercase__ : Any = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
lowercase__ : Optional[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
lowercase__ : Dict = '''rwkv.''' + name
lowercase__ : Union[str, Any] = weight
return state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=None ) -> List[str]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
lowercase__ : List[Any] = 5_02_77
lowercase__ : str = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
lowercase__ : Optional[int] = PreTrainedTokenizerFast(tokenizer_file=__lowerCamelCase )
lowercase__ : Dict = len(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
# 2. Build the config
lowercase__ : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase__ : Union[str, Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
lowercase__ : Optional[int] = RwkvConfig(
vocab_size=__lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCamelCase )
# 3. Download model file then convert state_dict
lowercase__ : Dict = hf_hub_download(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = torch.load(__lowerCamelCase , map_location='''cpu''' )
lowercase__ : Union[str, Any] = convert_state_dict(__lowerCamelCase )
# 4. Split in shards and save
lowercase__ , lowercase__ : Tuple = shard_checkpoint(__lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if index is not None:
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
# Save the index as well
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ : Union[str, Any] = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
lowercase__ : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase__ : List[Any] = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 302 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def __lowercase ( _a ):
snake_case_ : Dict = DPTConfig()
if "large" in checkpoint_url:
snake_case_ : Optional[int] = 1_024
snake_case_ : str = 4_096
snake_case_ : Any = 24
snake_case_ : Any = 16
snake_case_ : Tuple = [5, 11, 17, 23]
snake_case_ : Any = [256, 512, 1_024, 1_024]
snake_case_ : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case_ : Union[str, Any] = True
snake_case_ : int = 150
snake_case_ : Dict = '''huggingface/label-files'''
snake_case_ : Any = '''ade20k-id2label.json'''
snake_case_ : int = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def __lowercase ( _a ):
snake_case_ : List[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ : Dict = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case_ : Tuple = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case_ : int = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
snake_case_ : Any = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
snake_case_ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case_ : Any = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
snake_case_ : int = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
snake_case_ : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
snake_case_ : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ : str = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case_ : List[Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
snake_case_ : Optional[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
snake_case_ : str = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
snake_case_ : Dict = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
snake_case_ : Dict = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
snake_case_ : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
snake_case_ : str = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ : int = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
snake_case_ : Tuple = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
snake_case_ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case_ : List[str] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
snake_case_ : Any = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
snake_case_ : Optional[Any] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ : List[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ : str = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ : Any = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ : Any = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case_ : Optional[int] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case_ : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case_ : List[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case_ : Any = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case_ : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case_ : Union[str, Any] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case_ : Optional[Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
snake_case_ : Any = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
snake_case_ : int = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
snake_case_ : int = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
snake_case_ : Optional[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __lowercase ( _a , _a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
snake_case_ : Union[str, Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[: config.hidden_size, :]
snake_case_ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : str = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __lowercase ( ):
snake_case_ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : str = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a ):
snake_case_, snake_case_ : Dict = get_dpt_config(_a )
# load original state_dict from URL
snake_case_ : Dict = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_a )
# rename keys
for key in state_dict.copy().keys():
snake_case_ : str = state_dict.pop(_a )
snake_case_ : Tuple = val
# read in qkv matrices
read_in_q_k_v(_a , _a )
# load HuggingFace model
snake_case_ : Tuple = DPTForSemanticSegmentation(_a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_a )
model.load_state_dict(_a )
model.eval()
# Check outputs on an image
snake_case_ : Optional[int] = 480 if '''ade''' in checkpoint_url else 384
snake_case_ : Optional[int] = DPTImageProcessor(size=_a )
snake_case_ : Dict = prepare_img()
snake_case_ : int = image_processor(_a , return_tensors='''pt''' )
# forward pass
snake_case_ : Union[str, Any] = model(**_a ).logits if '''ade''' in checkpoint_url else model(**_a ).predicted_depth
# Assert logits
snake_case_ : str = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
snake_case_ : Optional[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _a , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _a )
)
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_a , )
image_processor.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_a , )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
lowercase__ : str = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 264 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict[Optional[str], Type[Formatter]] = {}
SCREAMING_SNAKE_CASE : Dict[Optional[str], str] = {}
SCREAMING_SNAKE_CASE : Dict[Optional[str], Exception] = {}
def lowercase ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] = None , ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__snake_case : int = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__snake_case : Tuple = format_type
def lowercase ( _snake_case : int , _snake_case : Optional[int] , _snake_case : Optional[int] = None ) ->List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__snake_case : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
SCREAMING_SNAKE_CASE : int = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
SCREAMING_SNAKE_CASE : Tuple = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
SCREAMING_SNAKE_CASE : Dict = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowercase ( _snake_case : List[Any] ) ->Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase ( _snake_case : int , **_snake_case : Union[str, Any] ) ->Formatter:
"""simple docstring"""
__snake_case : List[Any] = get_format_type_from_alias(__lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
| 361 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
lowerCAmelCase : Optional[int] = tf.data.AUTOTUNE
def a__ ( ) -> int:
lowerCamelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=__A , default="""roberta-base""" , help="""The model config to use. Note that we don\'t copy the model\'s weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=__A , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=__A , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=__A , help="""Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=__A , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=__A , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=__A , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=__A , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=__A , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=__A , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=__A , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=__A , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=__A , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=__A , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=__A , required=__A , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=__A , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowerCamelCase = parser.parse_args()
return args
def a__ ( snake_case__ ) -> Dict:
try:
if args.tpu_name:
lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__A )
tf.tpu.experimental.initialize_tpu_system(__A )
return tpu
def a__ ( snake_case__ ) -> int:
lowerCamelCase = 0
for file in file_list:
lowerCamelCase = file.split("""/""" )[-1]
lowerCamelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , __A ).group(1 )
lowerCamelCase = int(__A )
num_samples += sample_count
return num_samples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> int:
lowerCamelCase = count_samples(__A )
lowerCamelCase = tf.data.Dataset.from_tensor_slices(__A )
if shuffle:
lowerCamelCase = dataset.shuffle(len(__A ) )
lowerCamelCase = tf.data.TFRecordDataset(__A , num_parallel_reads=__A )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCamelCase = dataset.apply(tf.data.experimental.assert_cardinality(__A ) )
lowerCamelCase = dataset.map(__A , num_parallel_calls=__A )
if shuffle:
assert shuffle_buffer_size is not None
lowerCamelCase = dataset.shuffle(args.shuffle_buffer_size )
lowerCamelCase = dataset.batch(__A , drop_remainder=__A )
lowerCamelCase = dataset.map(__A , num_parallel_calls=__A )
lowerCamelCase = dataset.prefetch(__A )
return dataset
def a__ ( snake_case__ ) -> str:
if not args.no_tpu:
lowerCamelCase = initialize_tpu(__A )
lowerCamelCase = tf.distribute.TPUStrategy(__A )
else:
lowerCamelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer )
lowerCamelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCamelCase = tokenizer.vocab_size
lowerCamelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
lowerCamelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
lowerCamelCase = count_samples(__A )
lowerCamelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCamelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCamelCase = TFAutoModelForMaskedLM.from_config(__A )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCamelCase , lowerCamelCase = create_optimizer(
num_train_steps=__A , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__A , metrics=["""accuracy"""] )
def decode_fn(snake_case__ ):
lowerCamelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__A , __A )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCamelCase = DataCollatorForLanguageModeling(
tokenizer=__A , mlm_probability=args.mlm_probability , mlm=__A , return_tensors="""tf""" )
def mask_with_collator(snake_case__ ):
# TF really needs an isin() function
lowerCamelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowerCamelCase , lowerCamelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(__A ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__A , )
return batch
lowerCamelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCamelCase = prepare_dataset(
__A , decode_fn=__A , mask_fn=__A , batch_size=__A , shuffle=__A , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCamelCase = prepare_dataset(
__A , decode_fn=__A , mask_fn=__A , batch_size=__A , shuffle=__A , )
lowerCamelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__A ) )
model.fit(
__A , validation_data=__A , epochs=args.num_epochs , callbacks=__A , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase : Any = parse_args()
main(args)
| 291 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 368 | from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase( __lowerCamelCase ):
for param in module.parameters():
__a = False
def lowerCAmelCase( ):
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCAmelCase( __lowerCamelCase ):
__a = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def lowerCAmelCase( ):
__a = datetime.now()
__a = current_time.strftime('%H:%M:%S' )
return timestamp
| 197 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCAmelCase_( lowercase_ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_( lowercase_ : int = 1_00_01 ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowercase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowercase_ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def lowerCAmelCase_( lowercase_ : NDArray[floataa] , lowercase_ : NDArray[floataa] , lowercase_ : float = 10**-1 ) -> bool:
_lowerCamelCase = cross(lowercase_ , lowercase_ )
_lowerCamelCase = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__SCREAMING_SNAKE_CASE : Union[str, Any] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__SCREAMING_SNAKE_CASE : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__SCREAMING_SNAKE_CASE : Optional[int] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__SCREAMING_SNAKE_CASE : str = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__SCREAMING_SNAKE_CASE : str = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : str = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for attribute in key.split("." ):
UpperCamelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCamelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase : int = value
elif weight_type == "weight_g":
UpperCamelCase : Optional[int] = value
elif weight_type == "weight_v":
UpperCamelCase : str = value
elif weight_type == "bias":
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = fairseq_model.state_dict()
UpperCamelCase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : Any = name.split(_lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase : List[str] = "weight_g"
elif "weight_v" in name:
UpperCamelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : int = "weight"
else:
UpperCamelCase : str = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : str = full_name.split("conv_layers." )[-1]
UpperCamelCase : List[Any] = name.split("." )
UpperCamelCase : Union[str, Any] = int(items[0] )
UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
# load the pre-trained checkpoints
UpperCamelCase : Tuple = torch.load(_lowerCAmelCase )
UpperCamelCase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase : Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase : Dict = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Tuple = WavLMConfig()
UpperCamelCase : Any = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 52 |
from math import sqrt
def A_ ( _lowerCAmelCase ) -> bool:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase : List[Any] = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase : Union[str, Any] = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase : int = list(range(2 , n + 1 ) )
UpperCamelCase : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase : Tuple = 0
# filters actual prime numbers.
UpperCamelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase : Tuple = 2
UpperCamelCase : str = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Any = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A_ ( _lowerCAmelCase ) -> List[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A_ ( _lowerCAmelCase ) -> Any:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
UpperCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase )
UpperCamelCase : Tuple = len(_lowerCAmelCase )
# run variable for while-loops.
UpperCamelCase : Optional[int] = 0
UpperCamelCase : int = None
# exit variable. for break up the loops
UpperCamelCase : Union[str, Any] = True
while i < len_pn and loop:
UpperCamelCase : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Tuple = 0
while numbera != 0:
UpperCamelCase : Tuple = numbera % numbera
UpperCamelCase : Any = numbera
UpperCamelCase : Union[str, Any] = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Optional[int] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase : int = 0
UpperCamelCase : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase : str = p_number_a + 1 # jump to the next number
UpperCamelCase : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_ ( _lowerCAmelCase ) -> List[str]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase : Dict = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_ ( _lowerCAmelCase ) -> int:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase : int = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_ ( _lowerCAmelCase ) -> Dict:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase : Dict = 0
UpperCamelCase : Dict = 1
UpperCamelCase : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase : Any = ans
ans += fiba
UpperCamelCase : str = tmp
return ans
| 52 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase_ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case( __magic_name__ , __magic_name__ ) -> tuple[str, float]:
'''simple docstring'''
lowercase : Tuple = len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def snake_case( __magic_name__ , __magic_name__ ) -> tuple[str, str]:
'''simple docstring'''
lowercase : Tuple = random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowercase : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict = random.choice(__magic_name__ )
return "".join(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , ) -> list[str]:
'''simple docstring'''
lowercase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
lowercase : Optional[int] = int(parent_a[1] * 1_00 ) + 1
lowercase : int = 10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : str = population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Optional[Any] = crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
lowercase : str = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : List[Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : str = []
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : Tuple = [evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : Tuple = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Optional[int] = [
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
) | 116 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCAmelCase_ = 'bert-base-cased'
lowerCAmelCase_ = 'google/pegasus-xsum'
lowerCAmelCase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowerCAmelCase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowerCAmelCase_ = 'patrickvonplaten/t5-tiny-random'
lowerCAmelCase_ = 'sshleifer/bart-tiny-random'
lowerCAmelCase_ = 'sshleifer/tiny-mbart'
lowerCAmelCase_ = 'sshleifer/tiny-marian-en-de'
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = '''\n'''.join(__magic_name__ )
Path(__magic_name__ ).open('''w''' ).writelines(__magic_name__ )
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__magic_name__ , F"""{split}.source""" ) , __magic_name__ )
_dump_articles(os.path.join(__magic_name__ , F"""{split}.target""" ) , __magic_name__ )
return tmp_dir
class _A ( _lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self : List[str] , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : List[str] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowercase : Optional[int] = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowercase : str = 4
lowercase : List[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase , lowercase : Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
lowercase : int = SeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , )
lowercase : Optional[int] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_A , _A )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase : int = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self : int , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : Optional[int] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowercase : List[Any] = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowercase : List[Any] = 4
lowercase : Any = LegacySeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=20 , max_target_length=_A , )
lowercase : Optional[Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self : List[str] ) -> int:
"""simple docstring"""
lowercase : Tuple = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
lowercase : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines()
lowercase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_A , _A , 128 , _A )
lowercase : Dict = {x.name for x in tmp_dir.iterdir()}
lowercase : Optional[Any] = {x.name for x in save_dir.iterdir()}
lowercase : int = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_A ) < len(_A )
assert len(_A ) == 1
assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
lowercase , lowercase , lowercase : Any = self._get_dataset(max_len=64 )
lowercase : List[Any] = 64
lowercase : Any = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A )
lowercase : Tuple = [len(_A ) for x in batch_sampler]
assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_A ) == len(_A ) # no dropped or added examples
lowercase : str = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : Optional[int] = []
lowercase : str = []
for batch in data_loader:
lowercase : Tuple = batch['''input_ids'''].shape
lowercase : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase : Dict = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(_A )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_A )
assert num_src_per_batch[0] == max(_A )
if failures:
raise AssertionError(f"""too many tokens in {len(_A )} batches""" )
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase , lowercase , lowercase : Tuple = self._get_dataset(max_len=512 )
lowercase : Tuple = 2
lowercase : Union[str, Any] = ds.make_sortish_sampler(_A , shuffle=_A )
lowercase : List[Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : List[Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A )
lowercase : int = tokenizer.pad_token_id
def count_pad_tokens(_A : List[Any] , _A : Union[str, Any]="input_ids" ):
return [batch[k].eq(_A ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_A , k='''labels''' ) ) < sum(count_pad_tokens(_A , k='''labels''' ) )
assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) )
assert len(_A ) == len(_A )
def __a ( self : Any , _A : Union[str, Any]=1_000 , _A : str=128 ) -> List[Any]:
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , _A ):
lowercase : Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
lowercase : Optional[int] = max_len * 2 * 64
if not Path(_A ).joinpath('''train.len''' ).exists():
save_len_file(_A , _A )
else:
lowercase : Tuple = '''examples/seq2seq/test_data/wmt_en_ro'''
lowercase : Optional[Any] = max_len * 4
save_len_file(_A , _A )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = SeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , n_obs=_A , )
return ds, max_tokens, tokenizer
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase , lowercase , lowercase : Union[str, Any] = self._get_dataset()
lowercase : int = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=0 , add_extra_examples=_A ) )
lowercase : Dict = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=1 , add_extra_examples=_A ) )
assert idsa.intersection(_A ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self : Union[str, Any] , _A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
if tok_name == MBART_TINY:
lowercase : Tuple = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
lowercase : Union[str, Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase : List[Any] = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
lowercase : Dict = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0 | 116 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = XGLMTokenizer
__UpperCamelCase = XGLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def lowerCamelCase__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[str] = XGLMTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = '''<pad>'''
lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_8 )
def lowerCamelCase__ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = XGLMTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase__ ( self : Optional[int] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowerCamelCase__ ( self : str ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase_ , f.name )
lowerCAmelCase : Any = XGLMTokenizer(f.name , keep_accents=UpperCamelCase_ )
lowerCAmelCase : Any = pickle.dumps(UpperCamelCase_ )
pickle.loads(UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : Optional[Any] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase : int = tokenizer.tokenize(UpperCamelCase_ )
lowerCAmelCase : str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : List[str] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = '''Hello World!'''
lowerCAmelCase : List[str] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
lowerCAmelCase : Tuple = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def lowerCamelCase__ ( self : Any ):
# fmt: off
lowerCAmelCase : Optional[int] = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''facebook/xglm-564M''' , padding=UpperCamelCase_ , )
| 60 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCamelCase_ = {
"allenai/led-base-16384": 16_384,
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[Any] =LEDTokenizer
UpperCamelCase__ : List[str] =["input_ids", "attention_mask"]
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :Dict="replace" , _lowercase :Optional[int]="<s>" , _lowercase :Any="</s>" , _lowercase :Union[str, Any]="</s>" , _lowercase :List[Any]="<s>" , _lowercase :int="<unk>" , _lowercase :List[str]="<pad>" , _lowercase :int="<mask>" , _lowercase :Tuple=False , _lowercase :List[str]=True , **_lowercase :str , ) -> Dict:
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , **_lowercase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , _lowercase) != add_prefix_space:
UpperCAmelCase_ = getattr(_lowercase , pre_tok_state.pop('''type'''))
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**_lowercase)
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = '''post_processor'''
UpperCAmelCase_ = getattr(self.backend_tokenizer , _lowercase , _lowercase)
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state['''sep'''])
if "cls" in state:
UpperCAmelCase_ = tuple(state['''cls'''])
UpperCAmelCase_ = False
if state.get('''add_prefix_space''' , _lowercase) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get('''trim_offsets''' , _lowercase) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(_lowercase , state.pop('''type'''))
UpperCAmelCase_ = component_class(**_lowercase)
setattr(self.backend_tokenizer , _lowercase , _lowercase)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self :List[Any]) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def __a ( self :List[str] , _lowercase :int) -> Tuple:
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else value
UpperCAmelCase_ = value
def __a ( self :Union[str, Any] , *_lowercase :Tuple , **_lowercase :str) -> BatchEncoding:
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''')
return super()._batch_encode_plus(*_lowercase , **_lowercase)
def __a ( self :int , *_lowercase :Any , **_lowercase :List[str]) -> BatchEncoding:
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''')
return super()._encode_plus(*_lowercase , **_lowercase)
def __a ( self :Any , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
UpperCAmelCase_ = self._tokenizer.model.save(_lowercase , name=_lowercase)
return tuple(_lowercase)
def __a ( self :str , _lowercase :Optional[int] , _lowercase :Union[str, Any]=None) -> str:
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __a ( self :Tuple , _lowercase :Union[Dict[str, EncodedInput], BatchEncoding] , _lowercase :Optional[int] = None , _lowercase :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowercase :Optional[int] = None , _lowercase :Optional[bool] = None , ) -> dict:
UpperCAmelCase_ = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ = len(encoded_inputs['''global_attention_mask''']) != len(_lowercase)
if needs_to_be_padded:
UpperCAmelCase_ = len(_lowercase) - len(encoded_inputs['''global_attention_mask'''])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side))
return encoded_inputs
| 344 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 1 |
'''simple docstring'''
from collections import defaultdict
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str ):
'''simple docstring'''
__lowercase =first_str.lower().strip()
__lowercase =second_str.lower().strip()
# Remove whitespace
__lowercase =first_str.replace(' ', '' )
__lowercase =second_str.replace(' ', '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
__lowercase =defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase = input('''Enter the first string ''').strip()
UpperCAmelCase = input('''Enter the second string ''').strip()
UpperCAmelCase = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 141 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
if isinstance(lowercase__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : Union[str, Any] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =size if size is not None else {'shortest_edge': 224}
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowercase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowercase =get_size_dict(__lowercase , param_name='crop_size' )
__lowercase =do_resize
__lowercase =size
__lowercase =do_center_crop
__lowercase =crop_size
__lowercase =resample
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_normalize
__lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
__lowercase =get_resize_output_image_size(__lowercase , size['shortest_edge'] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__lowercase =(size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase , size=(size['height'], size['width']) , data_format=__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
"""simple docstring"""
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Optional[Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowercase =to_numpy_array(__lowercase )
if do_resize:
__lowercase =self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
__lowercase =self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
__lowercase =self.rescale(image=__lowercase , scale=__lowercase )
if do_normalize:
__lowercase =self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
__lowercase =to_channel_dimension_format(__lowercase , __lowercase )
return image
def snake_case ( self : Union[str, Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Tuple , ):
"""simple docstring"""
__lowercase =do_resize if do_resize is not None else self.do_resize
__lowercase =resample if resample is not None else self.resample
__lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase =do_normalize if do_normalize is not None else self.do_normalize
__lowercase =image_mean if image_mean is not None else self.image_mean
__lowercase =image_std if image_std is not None else self.image_std
__lowercase =size if size is not None else self.size
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowercase =crop_size if crop_size is not None else self.crop_size
__lowercase =get_size_dict(__lowercase , param_name='crop_size' )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__lowercase =make_batched(__lowercase )
__lowercase =[
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
__lowercase ={'pixel_values': videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 141 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[float, list[float]]:
__UpperCAmelCase : List[Any] = list(range(len(snake_case__ ) ) )
__UpperCAmelCase : Optional[int] = [v / w for v, w in zip(snake_case__, snake_case__ )]
index.sort(key=lambda snake_case__ : ratio[i], reverse=snake_case__ )
__UpperCAmelCase : float = 0
__UpperCAmelCase : list[float] = [0] * len(snake_case__ )
for i in index:
if weight[i] <= capacity:
__UpperCAmelCase : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCAmelCase : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , attrs="titleColumn" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase_ , lowerCAmelCase_ )
}
def UpperCAmelCase__ (lowerCAmelCase_ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_imdb_top_aaa_movies()
with open(lowerCAmelCase_ , "w" , newline="" ) as out_file:
__SCREAMING_SNAKE_CASE = csv.writer(lowerCAmelCase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 54 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase = trt.Logger(trt.Logger.WARNING)
lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase = args.per_device_eval_batch_size
lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase = True
lowerCamelCase = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowerCAmelCase__ : Any = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _a )
# start time
lowerCAmelCase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_a ) for d_inp in d_inputs] + [int(_a ), int(_a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_a , _a , _a )
cuda.memcpy_dtoh_async(_a , _a , _a )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ : int = time.time()
lowerCAmelCase__ : int = end_time - start_time
lowerCAmelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase = raw_datasets['''validation'''].column_names
lowerCamelCase = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ : int = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_a , stride=args.doc_stride , return_overflowing_tokens=_a , return_offsets_mapping=_a , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ : Any = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ : List[str] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ : Dict = tokenized_examples.sequence_ids(_a )
lowerCAmelCase__ : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ : Dict = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ : Union[str, Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
lowerCamelCase = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase = default_data_collator
lowerCamelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase_ ( _a , _a , _a , _a="eval" ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = postprocess_qa_predictions(
examples=_a , features=_a , predictions=_a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ : Tuple = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ : int = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowerCAmelCase__ : str = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_a , label_ids=_a )
lowerCamelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(_a ) ) * engine.get_binding_dtype(_a ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase = 0.0
lowerCamelCase = 0
lowerCamelCase = timeit.default_timer()
lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase, lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase, lowerCamelCase = outputs
lowerCamelCase = torch.tensor(start_logits)
lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.