code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( UpperCamelCase : List[str] , UpperCamelCase : Dict ) -> int:
if len(UpperCamelCase ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase , n - 1 )
rec_insertion_sort(UpperCamelCase , n - 1 )
def __magic_name__ ( UpperCamelCase : str , UpperCamelCase : int ) -> Optional[Any]:
if index >= len(UpperCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
a__ = (
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase , index + 1 )
if __name__ == "__main__":
a : Union[str, Any] = input('Enter integers separated by spaces: ')
a : Optional[Any] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 273 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = LayoutLMTokenizer
UpperCamelCase_ : str = LayoutLMTokenizerFast
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[Any] = True
def _A ( self : Any ):
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : str , **UpperCAmelCase_ : Optional[int] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Any = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : List[str] ):
pass
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 277 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = os.path.join(args.tf_model_dir , "parameters.json" )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(open(lowercase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
SCREAMING_SNAKE_CASE : Optional[int] = args.output + ".pt"
SCREAMING_SNAKE_CASE : Any = OrderedDict()
with tf.device("/CPU:0" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : Any = reader.get_tensor(lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : List[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.startswith("model/moe" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/softmlp/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
SCREAMING_SNAKE_CASE : Optional[int] = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
SCREAMING_SNAKE_CASE : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name.startswith("model/mlp" ):
SCREAMING_SNAKE_CASE : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/p1/bias" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/kernel" ):
SCREAMING_SNAKE_CASE : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/bias" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
SCREAMING_SNAKE_CASE : str = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
elif key_name.startswith("model/ln" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : List[str] = "model.blocks.%d.feed_forward.norm.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/att" ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : List[str] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : Tuple = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : List[Any] = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Tuple = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/o/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif key_name.startswith("model/an" ):
SCREAMING_SNAKE_CASE : int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.self_attn.norm.bias" % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.self_attn.norm.weight" % player
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
SCREAMING_SNAKE_CASE : str = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : List[str] = "model.%s.weight" % nlayer
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
if key_name.startswith("model/wte" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "lm_head.weight"
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/wob" ):
SCREAMING_SNAKE_CASE : List[Any] = "final_logits_bias"
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : List[str] = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : int = torch.tensor(lowercase )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Optional[int] = "model.last_project.weight"
SCREAMING_SNAKE_CASE : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : str = "model.last_project.bias"
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
torch.save(lowercase , args.output )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 62 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = np.array(data['data'])
__UpperCAmelCase = np.array(data['target'])
__UpperCAmelCase = data['target_names']
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y)
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : int , lowercase__ : str=5 ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
lowerCAmelCase_ : Optional[int] = []
for data_point in data:
lowerCAmelCase_ : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase_ : List[Any] = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase_ : List[Any] = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 600 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[int] = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Union[str, Any] , **UpperCAmelCase_ : Any ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : int = image_processor(UpperCAmelCase_ , return_tensors="np" )
SCREAMING_SNAKE_CASE : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = "lower newer"
SCREAMING_SNAKE_CASE : Dict = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = "lower newer"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase_ ):
processor()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Optional[Any] = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = "lower newer"
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 62 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''transfo-xl'''
_lowerCAmelCase : List[str] = ['''mems''']
_lowerCAmelCase : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase__=2_6_7_7_3_5 , lowercase__=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=1_6 , lowercase__=6_4 , lowercase__=4_0_9_6 , lowercase__=4 , lowercase__=False , lowercase__=1_8 , lowercase__=1_6_0_0 , lowercase__=1_0_0_0 , lowercase__=True , lowercase__=True , lowercase__=0 , lowercase__=-1 , lowercase__=True , lowercase__=0.1 , lowercase__=0.0 , lowercase__=True , lowercase__="normal" , lowercase__=0.0_1 , lowercase__=0.0_1 , lowercase__=0.0_2 , lowercase__=1e-5 , lowercase__=0 , **lowercase__ , ):
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[Any] = []
self.cutoffs.extend(UpperCAmelCase_)
if proj_share_all_but_first:
__UpperCAmelCase : List[Any] = [False] + [True] * len(self.cutoffs)
else:
__UpperCAmelCase : Dict = [False] + [False] * len(self.cutoffs)
__UpperCAmelCase : Dict = d_model
__UpperCAmelCase : Optional[int] = d_embed
__UpperCAmelCase : Optional[Any] = d_head
__UpperCAmelCase : Any = d_inner
__UpperCAmelCase : str = div_val
__UpperCAmelCase : str = pre_lnorm
__UpperCAmelCase : Optional[int] = n_layer
__UpperCAmelCase : Optional[int] = n_head
__UpperCAmelCase : List[str] = mem_len
__UpperCAmelCase : Optional[int] = same_length
__UpperCAmelCase : Optional[int] = attn_type
__UpperCAmelCase : List[Any] = clamp_len
__UpperCAmelCase : Dict = sample_softmax
__UpperCAmelCase : List[str] = adaptive
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : Optional[int] = dropatt
__UpperCAmelCase : str = untie_r
__UpperCAmelCase : str = init
__UpperCAmelCase : List[Any] = init_range
__UpperCAmelCase : Union[str, Any] = proj_init_std
__UpperCAmelCase : Optional[Any] = init_std
__UpperCAmelCase : Any = layer_norm_epsilon
super().__init__(eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
@property
def A( self):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def A( self , lowercase__):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 462 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier" ) )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : int=0.25 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str="relu6" , UpperCAmelCase_ : List[str]=1280 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : int = depth_multiplier
SCREAMING_SNAKE_CASE : str = depth_divisible_by
SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth
SCREAMING_SNAKE_CASE : int = expand_ratio
SCREAMING_SNAKE_CASE : Tuple = tf_padding
SCREAMING_SNAKE_CASE : List[str] = output_stride
SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion
SCREAMING_SNAKE_CASE : Any = finegrained_output
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _A ( self : Optional[int] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : int = MobileNetVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MobileNetVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : str = False
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _A ( self : List[Any] ):
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : Any = 16
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def _A ( self : Optional[Any] ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileNetVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
SCREAMING_SNAKE_CASE : int = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 62 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCamelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase = sorted(arg_to_scheduler.keys())
UpperCamelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class UpperCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="base" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCAmelCase_ )
a_ : int = 0
a_ : List[str] = Path(self.hparams.output_dir )
a_ : Optional[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a_ : Dict = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
a_ : PretrainedConfig = config
a_ : Optional[int] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(self.config , UpperCAmelCase_ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCAmelCase_ , getattr(self.hparams , UpperCAmelCase_ ) )
if tokenizer is None:
a_ : Any = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCAmelCase_ , )
else:
a_ : PreTrainedTokenizer = tokenizer
a_ : Union[str, Any] = MODEL_MODES[mode]
if model is None:
a_ : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCAmelCase_ , )
else:
a_ : int = model
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
a_ : str = self.model_type.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A ( self ) -> List[str]:
a_ : Any = arg_to_scheduler[self.hparams.lr_scheduler]
a_ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a_ : str = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def A ( self ) -> int:
a_ : Union[str, Any] = self.model
a_ : Optional[int] = ["bias", "LayerNorm.weight"]
a_ : List[Any] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
a_ : Optional[int] = Adafactor(
UpperCAmelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCAmelCase_ , relative_step=UpperCAmelCase_ )
else:
a_ : Dict = AdamW(
UpperCAmelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a_ : Optional[int] = optimizer
a_ : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
return self.validation_step(UpperCAmelCase_ , UpperCAmelCase_ )
def A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.validation_end(UpperCAmelCase_ )
def A ( self ) -> Optional[int]:
a_ : Optional[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a_ : int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if stage == "test":
a_ : Any = len(self.test_dataloader().dataset )
else:
a_ : Any = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
a_ : Optional[Any] = len(self.train_dataloader().dataset )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> List[Any]:
raise NotImplementedError("You must implement this for your task" )
def A ( self ) -> List[str]:
return self.train_loader
def A ( self ) -> Optional[int]:
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCAmelCase_ )
def A ( self ) -> List[str]:
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCAmelCase_ )
def A ( self , _SCREAMING_SNAKE_CASE ) -> str:
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCAmelCase_ , list(filter(UpperCAmelCase_ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
a_ : List[Any] = self.output_dir.joinpath("best_tfmr" )
a_ : Optional[int] = self.step_count
self.model.save_pretrained(UpperCAmelCase_ )
self.tokenizer.save_pretrained(UpperCAmelCase_ )
@staticmethod
def A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCAmelCase_ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCAmelCase_ ).parent / "test_run" / "cache" ) , type=UpperCAmelCase_ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCAmelCase_ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCAmelCase_ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCAmelCase_ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCAmelCase_ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCAmelCase_ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCAmelCase_ , metavar=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCAmelCase_ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCAmelCase_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCAmelCase_ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCAmelCase_ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCAmelCase_ )
parser.add_argument("--train_batch_size" , default=3_2 , type=UpperCAmelCase_ )
parser.add_argument("--eval_batch_size" , default=3_2 , type=UpperCAmelCase_ )
parser.add_argument("--adafactor" , action="store_true" )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCAmelCase_ )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
a_ : Any = trainer.lr_schedulers[0]["scheduler"]
a_ : Optional[Any] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCAmelCase_ )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
rank_zero_info("***** Validation results *****" )
a_ : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCAmelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
rank_zero_info("***** Test results *****" )
a_ : List[str] = trainer.callback_metrics
# Log and save results to file
a_ : Optional[int] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCAmelCase_ , "w" ) as writer:
for key in sorted(UpperCAmelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCAmelCase_ , str(metrics[key] ) ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
parser.add_argument(
"--output_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "model_checkpoints" ) , type=_SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_SCREAMING_SNAKE_CASE , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-train-data" ) , type=_SCREAMING_SNAKE_CASE , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :int=None , _SCREAMING_SNAKE_CASE :Optional[int]=True , _SCREAMING_SNAKE_CASE :List[str]=[] , _SCREAMING_SNAKE_CASE :int=None , _SCREAMING_SNAKE_CASE :Tuple=None , **_SCREAMING_SNAKE_CASE :str , ) -> Dict:
pl.seed_everything(args.seed )
# init model
a_ : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
a_ : Optional[int] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
a_ : Optional[Any] = LoggingCallback()
a_ : List[Any] = {}
if args.fpaa:
a_ : Optional[Any] = 16
if args.gpus > 1:
a_ : Optional[int] = "auto"
a_ : Tuple = "ddp"
a_ : str = args.accumulate_grad_batches
a_ : Tuple = None
a_ : str = "auto"
a_ : Tuple = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 473 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
def constraint_to_multiple_of(lowercase , lowercase , lowercase=0 , lowercase=None ):
SCREAMING_SNAKE_CASE : Any = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : int = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Tuple = (output_size, output_size) if isinstance(lowercase , lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_image_size(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Tuple = output_height / input_height
SCREAMING_SNAKE_CASE : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE : int = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase )
SCREAMING_SNAKE_CASE : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : int = ensure_multiple_of
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ):
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(
UpperCAmelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCAmelCase_ , multiple=UpperCAmelCase_ , )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Dict , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Any = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Tuple] = None ):
SCREAMING_SNAKE_CASE : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : str = []
for idx in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : str = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 62 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowercase : str = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __a ( A__ ) -> Optional[Any]:
lowerCAmelCase = torch.load(A__ , map_location="cpu" )
return sd
def __a ( A__ , A__ , A__=rename_keys_prefix ) -> Optional[int]:
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __a ( A__ , A__ ) -> int:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 512}
lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 2048}
lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
lowerCAmelCase = "nlvr"
lowerCAmelCase = VisualBertConfig(**A__ )
# Load State Dict
lowerCAmelCase = load_state_dict(A__ )
lowerCAmelCase = get_new_dict(A__ , A__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(A__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(A__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(A__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(A__ )
model.load_state_dict(A__ )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
lowercase : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 649 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int = 6 ):
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[int] = Node()
SCREAMING_SNAKE_CASE : str = current_node
SCREAMING_SNAKE_CASE : Optional[int] = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = current_node
for _ in range(1 , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = Node()
SCREAMING_SNAKE_CASE : Dict = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE : Optional[Any] = current_node
SCREAMING_SNAKE_CASE : Union[str, Any] = self.front
SCREAMING_SNAKE_CASE : List[str] = previous_node
def _A ( self : Union[str, Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _A ( self : Optional[int] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _A ( self : Optional[int] , UpperCAmelCase_ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE : List[str] = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE : Dict = data
def _A ( self : List[str] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE : List[str] = self.front.data
SCREAMING_SNAKE_CASE : Optional[int] = None
return data
SCREAMING_SNAKE_CASE : List[str] = self.front
SCREAMING_SNAKE_CASE : List[str] = old_front.next
SCREAMING_SNAKE_CASE : Optional[int] = old_front.data
SCREAMING_SNAKE_CASE : List[str] = None
return data
def _A ( self : Any ):
if self.is_empty():
raise Exception("Empty Queue" )
def _A ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any | None = None
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = question_encoder
snake_case_ = generator
snake_case_ = self.question_encoder
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case_ = os.path.join(UpperCAmelCase_ , "question_encoder_tokenizer" )
snake_case_ = os.path.join(UpperCAmelCase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(UpperCAmelCase_ )
self.generator.save_pretrained(UpperCAmelCase_ )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ = kwargs.pop("config" , UpperCAmelCase_ )
if config is None:
snake_case_ = RagConfig.from_pretrained(UpperCAmelCase_ )
snake_case_ = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
snake_case_ = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=UpperCAmelCase_ , generator=UpperCAmelCase_ )
def __call__( self : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.current_tokenizer(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return self.generator.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
return self.generator.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.question_encoder
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ = self.generator
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : str = "longest" , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , UpperCAmelCase_ , )
if max_length is None:
snake_case_ = self.current_tokenizer.model_max_length
snake_case_ = self(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ = self.current_tokenizer.model_max_length
snake_case_ = self(
text_target=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = labels["input_ids"]
return model_inputs
| 283 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 62 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( snake_case__ , snake_case__ , snake_case__ = 1_60_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE__ = randint(0 , len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'A file containing the training audio paths and labels.'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'A file containing the validation audio paths and labels.'} )
lowerCamelCase__ : str = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
lowerCamelCase__ : str = field(
default='validation' ,metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
lowerCamelCase__ : str = field(
default='audio' ,metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} ,)
lowerCamelCase__ : str = field(
default='label' ,metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
lowerCamelCase__ : float = field(
default=2_0 ,metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : str = field(
default='facebook/wav2vec2-base' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
lowerCamelCase__ : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,)
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE__ = DatasetDict()
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE__ = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
SCREAMING_SNAKE_CASE__ = []
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE__ = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
SCREAMING_SNAKE_CASE__ = feature_extractor(snake_case__ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ = {model_input_name: inputs.get(snake_case__ )}
SCREAMING_SNAKE_CASE__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
SCREAMING_SNAKE_CASE__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE__ = feature_extractor(snake_case__ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ = {model_input_name: inputs.get(snake_case__ )}
SCREAMING_SNAKE_CASE__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE__ = raw_datasets["train"].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE__ = {}, {}
for i, label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ = str(snake_case__ )
SCREAMING_SNAKE_CASE__ = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE__ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
SCREAMING_SNAKE_CASE__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case__ , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel=snake_case__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ , output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ , output_all_columns=snake_case__ )
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 196 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
SCREAMING_SNAKE_CASE : Union[str, Any] = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.expand_dims(lowercase , 1 ) * jnp.expand_dims(lowercase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE : Optional[int] = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.cos(lowercase ), jnp.sin(lowercase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(lowercase ), jnp.cos(lowercase )] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = jnp.reshape(lowercase , [jnp.shape(lowercase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(UpperCAmelCase_ )
return temb
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : bool = False
UpperCamelCase_ : float = 1
@nn.compact
def __call__( self : Optional[int] , UpperCAmelCase_ : int ):
return get_sinusoidal_embeddings(
UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 62 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier" ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=0.25 , lowerCAmelCase=8 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=32 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu6" , lowerCAmelCase=1280 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = depth_divisible_by
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = expand_ratio
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = first_layer_is_expansion
UpperCAmelCase_ = finegrained_output
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = MobileNetVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
def A__ ( self ):
UpperCAmelCase_ = MobileNetVaModelTester(self )
UpperCAmelCase_ = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def A__ ( self ):
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def A__ ( self ):
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCAmelCase_ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 16
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def A__ ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileNetVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def A__ ( self ):
UpperCAmelCase_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCAmelCase_ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**UpperCAmelCase_ )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
UpperCAmelCase_ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def A__ ( self ):
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = model.to(UpperCAmelCase_ )
UpperCAmelCase_ = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**UpperCAmelCase_ )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
UpperCAmelCase_ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 579 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase_ : Union[str, Any] = '''CIDAS/clipseg-rd64-refined'''
UpperCamelCase_ : Any = '''image_segmenter'''
UpperCamelCase_ : int = CLIPSegForImageSegmentation
UpperCamelCase_ : Optional[Any] = ['''image''', '''text''']
UpperCamelCase_ : int = ['''image''']
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors="pt" )
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = self.model(**UpperCAmelCase_ ).logits
return logits
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 62 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Any ) -> str:
for attribute in key.split(""".""" ):
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Any = value
elif weight_type == "weight_v":
_lowerCAmelCase : List[str] = value
elif weight_type == "bias":
_lowerCAmelCase : int = value
else:
_lowerCAmelCase : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Any ) -> str:
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : List[str] = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Optional[int] = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Optional[Any] = fairseq_model.proj
_lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Dict = True
if "*" in mapped_key:
_lowerCAmelCase : Dict = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : Optional[Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Tuple = "bias"
elif "weight" in name:
_lowerCAmelCase : Union[str, Any] = "weight"
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : int = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Tuple = name.split(""".""" )
_lowerCAmelCase : int = int(items[0] )
_lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Tuple:
_lowerCAmelCase : Any = emb.weight.shape
_lowerCAmelCase : Optional[int] = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
_lowerCAmelCase : str = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> List[str]:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
_lowerCAmelCase : Union[str, Any] = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ,) -> Dict:
_lowerCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Dict = SpeechaTextaConfig.from_pretrained(
_lowerCamelCase ,vocab_size=_lowerCamelCase ,decoder_layers=_lowerCamelCase ,do_stable_layer_norm=_lowerCamelCase )
_lowerCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Dict = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : str = WavaVecaModel(_lowerCamelCase )
_lowerCAmelCase : List[str] = recursively_load_weights_wavaveca(model.encoder ,_lowerCamelCase )
_lowerCAmelCase : Tuple = SpeechaTextaForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_lowerCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=_lowerCamelCase ,decoder=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = False
# add projection layer
_lowerCAmelCase : Any = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Dict = create_vocab_dict(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"""vocab.json""" ) ,"""w""" ) as fp:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCamelCase ,"""vocab.json""" ) )
tokenizer.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = hf_wavavec.config.to_dict()
_lowerCAmelCase : Tuple = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer.eos_token_id
_lowerCAmelCase : int = "speech_to_text_2"
_lowerCAmelCase : List[str] = "wav2vec2"
_lowerCAmelCase : Dict = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_a : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 213 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
UpperCamelCase_ : int = False
def _A ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : List[Any] , **UpperCAmelCase_ : str ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : int = "adapt act apte"
return input_text, output_text
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : List[str] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
SCREAMING_SNAKE_CASE : str = "I am a small frog."
SCREAMING_SNAKE_CASE : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : int = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
SCREAMING_SNAKE_CASE : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE : Optional[int] = "."
SCREAMING_SNAKE_CASE : Dict = tok(UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = tok(UpperCAmelCase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase : List[str] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCAmelCase : Dict = 6
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : int = 19_01
_lowerCAmelCase : str = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : Any = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCAmelCase : Tuple = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : Union[str, Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCAmelCase : Union[str, Any] = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 384 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Any = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Tuple = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Optional[Any] = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Tuple = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : List[str] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Any = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : int = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 62 | 0 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : Optional[int] ) -> Optional[int]:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
a__ = ""
while len(UpperCamelCase ) % 3 != 0:
a__ = "0" + bin_string
a__ = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a__ = 0
for index, val in enumerate(UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase ) )
oct_string += str(UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
UpperCAmelCase__ = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 277 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
snake_case = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
snake_case = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _A ( self : List[Any] , UpperCAmelCase_ : List[List[List[str]]] , UpperCAmelCase_ : List[List[str]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase_ , hypotheses=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ )
}
| 62 | 0 |
from math import sqrt
def __UpperCamelCase ( lowercase__ : Tuple ) -> Any:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : Any = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Union[str, Any] = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def __UpperCamelCase ( lowercase__ : int ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Union[str, Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[str] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Dict = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( lowercase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : int = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[str] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowercase__ )
lowerCAmelCase_ : List[str] = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[str] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowercase__ )
lowerCAmelCase_ : List[Any] = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def __UpperCamelCase ( lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def __UpperCamelCase ( lowercase__ : str ) -> Any:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def __UpperCamelCase ( lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : Tuple = get_prime_numbers(lowercase__ )
lowerCAmelCase_ : Tuple = len(lowercase__ )
# run variable for while-loops.
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = None
# exit variable. for break up the loops
lowerCAmelCase_ : Any = True
while i < len_pn and loop:
lowerCAmelCase_ : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : List[str] = numbera % numbera
lowerCAmelCase_ : Dict = numbera
lowerCAmelCase_ : Optional[Any] = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Optional[int] = max(lowercase__ , lowercase__ )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowercase__ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
lowerCAmelCase_ : List[Any] = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : int = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __UpperCamelCase ( lowercase__ : List[Any] ) -> str:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __UpperCamelCase ( lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __UpperCamelCase ( lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Optional[int] = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __UpperCamelCase ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : int = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : str = ans
ans += fiba
lowerCAmelCase_ : Tuple = tmp
return ans
| 600 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = row, column
SCREAMING_SNAKE_CASE : Optional[Any] = [[default_value for c in range(UpperCAmelCase_ )] for r in range(UpperCAmelCase_ )]
def __str__( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE : Dict = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : Optional[Any] = max(UpperCAmelCase_ , len(str(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = f'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Optional[int] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
return str(self )
def _A ( self : Optional[int] , UpperCAmelCase_ : tuple[int, int] ):
if not (isinstance(UpperCAmelCase_ , (list, tuple) ) and len(UpperCAmelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCAmelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float ):
assert self.validate_indicies(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = -self[r, c]
return result
def __sub__( self : Optional[Any] , UpperCAmelCase_ : Matrix ):
return self + (-another)
def __mul__( self : Dict , UpperCAmelCase_ : int | float | Matrix ):
if isinstance(UpperCAmelCase_ , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : List[str] = f'''Unsupported type given for another ({type(UpperCAmelCase_ )})'''
raise TypeError(UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : List[str] = self[r, c]
return result
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Tuple = v.transpose()
SCREAMING_SNAKE_CASE : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : str = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE : Optional[int] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = 1, 2, -3
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase , lowercase )}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 62 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCAmelCase = 0
lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCAmelCase = tuple[int, int]
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : Any = pos_y
__UpperCAmelCase : Optional[Any] = (pos_y, pos_x)
__UpperCAmelCase : Any = goal_x
__UpperCAmelCase : Optional[Any] = goal_y
__UpperCAmelCase : Dict = g_cost
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = self.calculate_heuristic()
__UpperCAmelCase : List[Any] = self.g_cost + self.h_cost
def A( self):
__UpperCAmelCase : Optional[int] = self.pos_x - self.goal_x
__UpperCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase_) + abs(UpperCAmelCase_)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self , lowercase__):
return self.f_cost < other.f_cost
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_)
__UpperCAmelCase : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCAmelCase_)
__UpperCAmelCase : List[str] = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : Tuple = False
def A( self):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : List[Any] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase_)
self.closed_nodes.append(UpperCAmelCase_)
__UpperCAmelCase : Tuple = self.get_successors(UpperCAmelCase_)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_)
else:
# retrieve the best current path
__UpperCAmelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_)
else:
self.open_nodes.append(UpperCAmelCase_)
return [self.start.pos]
def A( self , lowercase__):
__UpperCAmelCase : int = []
for action in delta:
__UpperCAmelCase : Optional[Any] = parent.pos_x + action[1]
__UpperCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCAmelCase_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ))
return successors
def A( self , lowercase__):
__UpperCAmelCase : List[Any] = node
__UpperCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__UpperCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = AStar(UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : int = AStar(UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : str = False
def A( self):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCAmelCase : Optional[int] = self.fwd_astar.open_nodes.pop(0)
__UpperCAmelCase : Dict = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_)
self.fwd_astar.closed_nodes.append(UpperCAmelCase_)
self.bwd_astar.closed_nodes.append(UpperCAmelCase_)
__UpperCAmelCase : Tuple = current_bwd_node
__UpperCAmelCase : Tuple = current_fwd_node
__UpperCAmelCase : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase_)
else:
# retrieve the best current path
__UpperCAmelCase : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase_))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase_)
else:
astar.open_nodes.append(UpperCAmelCase_)
return [self.fwd_astar.start.pos]
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : str = self.fwd_astar.retrace_path(UpperCAmelCase_)
__UpperCAmelCase : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase_)
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCAmelCase = (0, 0)
lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase = time.time()
lowerCAmelCase = AStar(init, goal)
lowerCAmelCase = a_star.search()
lowerCAmelCase = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
lowerCAmelCase = time.time()
lowerCAmelCase = BidirectionalAStar(init, goal)
lowerCAmelCase = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 462 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
snake_case = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
snake_case = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = RoFormerTokenizer
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : Any="[PAD]" , UpperCAmelCase_ : List[str]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Any = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __getstate__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = d
SCREAMING_SNAKE_CASE : Dict = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE : Any = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase_ ) )
def _A ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
| 62 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase = 'bart'
UpperCamelCase = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> Union[str, Any]:
if LOAD_DENSE_INDEX:
a_ : str = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
a_ : Optional[int] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
a_ : Union[str, Any] = qar_model.eval()
else:
a_ : List[Any] = (None, None)
if MODEL_TYPE == "bart":
a_ : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
a_ : Dict = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
a_ : List[str] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
a_ : List[Any] = sas_model.eval()
else:
a_ : str = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> str:
if LOAD_DENSE_INDEX:
a_ : str = faiss.StandardGpuResources()
a_ : Union[str, Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
a_ : int = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
a_ : Optional[int] = faiss.IndexFlatIP(128 )
a_ : int = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
a_ : Any = (None, None)
a_ : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> List[str]:
a_ : Optional[int] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
a_ : Any = elia["train_eli5"]
a_ : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
a_ : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_indexes()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = load_models()
UpperCamelCase , UpperCamelCase = load_train_data()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[Any]=10 ) -> Tuple:
a_ : List[str] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : int = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : List[str] = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Optional[int]="wiki40b" , _SCREAMING_SNAKE_CASE :List[Any]="dense" , _SCREAMING_SNAKE_CASE :List[str]=10 ) -> List[str]:
if source == "none":
a_ : int = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a_ : Any = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
a_ : Tuple = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name="english_wiki40b_snippets_100w" , n_results=_SCREAMING_SNAKE_CASE , )
a_ : Union[str, Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
a_ : Tuple = "question: {} context: {}".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Optional[int]=64 , _SCREAMING_SNAKE_CASE :Any=256 , _SCREAMING_SNAKE_CASE :int=False , _SCREAMING_SNAKE_CASE :str=2 , _SCREAMING_SNAKE_CASE :int=0.95 , _SCREAMING_SNAKE_CASE :List[str]=0.8 ) -> Union[str, Any]:
with torch.no_grad():
a_ : List[Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCamelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCamelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCamelCase = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCamelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCamelCase = action_list.index(action_st)
UpperCamelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCamelCase = show_type == 'Show full text of passages'
else:
UpperCamelCase = 3
UpperCamelCase = True
UpperCamelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCamelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCamelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCamelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCamelCase = 'wiki40b'
UpperCamelCase = 'dense'
UpperCamelCase = 'beam'
UpperCamelCase = 2
UpperCamelCase = 64
UpperCamelCase = 2_56
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCamelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCamelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCamelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase = None
# start main text
UpperCamelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCamelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase = st.text_input('Enter your question here:', '')
else:
UpperCamelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase = support_list[:10]
UpperCamelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase , UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCamelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCamelCase = res[1].strip()
if sec_titles == "":
UpperCamelCase = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCamelCase = sec_titles.split(' & ')
UpperCamelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style=\"font-family:arial; font-size:10pt;\">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase = find_nearest_training(question)
UpperCamelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCamelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCamelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 473 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE : Optional[int] = str(abs(lowercase ) )
SCREAMING_SNAKE_CASE : str = [list(lowercase ) for char in range(len(lowercase ) )]
for index in range(len(lowercase ) ):
num_transpositions[index].pop(lowercase )
return max(
int("".join(list(lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 62 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase : List[str] = logging.get_logger(__name__)
def __a ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
lowerCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
lowerCAmelCase = get_image_size(A__ )
lowerCAmelCase = output_size
# determine new height and width
lowerCAmelCase = output_height / input_height
lowerCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase = scale_width
else:
# fit height
lowerCAmelCase = scale_height
lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self : Any , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = keep_aspect_ratio
lowerCAmelCase = ensure_multiple_of
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : int , ) -> int:
"""simple docstring"""
lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}" )
lowerCAmelCase = get_resize_output_image_size(
UpperCAmelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCAmelCase_ , multiple=UpperCAmelCase_ , )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[str] , ) -> Dict:
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def __A ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Tuple] = None ) -> Dict:
"""simple docstring"""
lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase_ ):
lowerCAmelCase = target_sizes.numpy()
lowerCAmelCase = []
for idx in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase_ )
lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
lowerCAmelCase = logits.argmax(dim=1 )
lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 649 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCamelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase_ : List[Any] ):
if isinstance(UpperCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : Tuple = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
]
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = 10.0
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : Any = scale
SCREAMING_SNAKE_CASE : List[str] = pipe(**UpperCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : List[Any] = pipe(**UpperCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : Dict = scale
SCREAMING_SNAKE_CASE : Dict = pipe(**UpperCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _A ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase_ , controlnet=UpperCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = "evil space-punk bird"
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : str = pipe(
UpperCAmelCase_ , UpperCAmelCase_ , control_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 62 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
_SCREAMING_SNAKE_CASE = '''AutoImageProcessor'''
_SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : int ) -> Any:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.image_processor
snake_case_ = False
def __call__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = kwargs.pop("images" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
snake_case_ = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
snake_case_ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ = encodings["input_ids"]
return inputs
def lowerCAmelCase__ ( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def lowerCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.image_processor
snake_case_ = False
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Tuple=None ) -> List[Any]:
"""simple docstring"""
if added_vocab is None:
snake_case_ = self.tokenizer.get_added_vocab()
snake_case_ = {}
while tokens:
snake_case_ = re.search(R"<s_(.*?)>" , UpperCAmelCase_ , re.IGNORECASE )
if start_token is None:
break
snake_case_ = start_token.group(1 )
snake_case_ = re.search(RF'''</s_{key}>''' , UpperCAmelCase_ , re.IGNORECASE )
snake_case_ = start_token.group()
if end_token is None:
snake_case_ = tokens.replace(UpperCAmelCase_ , "" )
else:
snake_case_ = end_token.group()
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , UpperCAmelCase_ , re.IGNORECASE )
if content is not None:
snake_case_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case_ = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if value:
if len(UpperCAmelCase_ ) == 1:
snake_case_ = value[0]
snake_case_ = value
else: # leaf nodes
snake_case_ = []
for leaf in content.split(R"<sep/>" ):
snake_case_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case_ = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase_ )
if len(output[key] ) == 1:
snake_case_ = output[key][0]
snake_case_ = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , )
return self.image_processor
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[Any] = [144, 192, 240]
SCREAMING_SNAKE_CASE : Tuple = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [96, 120, 144]
SCREAMING_SNAKE_CASE : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [64, 80, 96]
SCREAMING_SNAKE_CASE : List[str] = [16, 16, 24, 48, 64, 80, 320]
SCREAMING_SNAKE_CASE : int = 0.05
SCREAMING_SNAKE_CASE : int = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : str = 512
SCREAMING_SNAKE_CASE : List[str] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 21
SCREAMING_SNAKE_CASE : Dict = "pascal-voc-id2label.json"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 1000
SCREAMING_SNAKE_CASE : Optional[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : Any = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : List[str] = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace(".block." , "." )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE : str = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE : str = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
SCREAMING_SNAKE_CASE : str = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE : List[Any] = "mobilevit." + name
return name
def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE : Optional[int] = ""
else:
SCREAMING_SNAKE_CASE : Any = "mobilevit."
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(lowercase )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE : int = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split("." )
SCREAMING_SNAKE_CASE : Any = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE : List[Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE : List[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
SCREAMING_SNAKE_CASE : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : str = val[:dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_mobilevit_config(lowercase )
# load original state_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowercase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : List[str] = MobileViTForSemanticSegmentation(lowercase ).eval()
else:
SCREAMING_SNAKE_CASE : str = MobileViTForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Any = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : str = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
SCREAMING_SNAKE_CASE : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
SCREAMING_SNAKE_CASE : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase , organization="apple" )
model.push_to_hub(lowercase , organization="apple" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 196 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case = datasets.load_iris()
snake_case = np.array(data["""data"""])
snake_case = np.array(data["""target"""])
snake_case = data["""target_names"""]
snake_case , snake_case , snake_case , snake_case = train_test_split(X, y)
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return np.linalg.norm(np.array(lowercase ) - np.array(lowercase ) )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = zip(lowercase , lowercase )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : Optional[int] = []
for data_point in data:
SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : List[Any] = [i[1] for i in sorted(lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : List[Any] = Counter(lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 62 | 0 |
import argparse
import struct
import unittest
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = data
# Initialize hash values
UpperCAmelCase_ = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
UpperCAmelCase_ = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
UpperCAmelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase_ ) + 8) % 64))
UpperCAmelCase_ = struct.pack(">Q" , (len(UpperCAmelCase_ ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ = list(struct.unpack(">16L" , UpperCAmelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
UpperCAmelCase_ = self.ror(UpperCAmelCase_ , 6 ) ^ self.ror(UpperCAmelCase_ , 11 ) ^ self.ror(UpperCAmelCase_ , 25 )
UpperCAmelCase_ = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
UpperCAmelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
UpperCAmelCase_ = self.ror(UpperCAmelCase_ , 2 ) ^ self.ror(UpperCAmelCase_ , 13 ) ^ self.ror(UpperCAmelCase_ , 22 )
UpperCAmelCase_ = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ = (sa + maj) % 0x1_0000_0000
UpperCAmelCase_ = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
UpperCAmelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ = "".join([hex(UpperCAmelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
import hashlib
UpperCAmelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCAmelCase_ ).hash , hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() )
def snake_case__ ( ) -> Optional[int]:
import doctest
doctest.testmod()
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(__SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 579 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''xlm-prophetnet'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : int = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[Union[str, Callable]] = "gelu" , UpperCAmelCase_ : Optional[int] = 3_0522 , UpperCAmelCase_ : Optional[int] = 1024 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[float] = 0.02 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 2 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[float] = 0.0 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 2 , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = num_encoder_layers
SCREAMING_SNAKE_CASE : Any = num_encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = num_decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = num_decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE : Dict = ngram
SCREAMING_SNAKE_CASE : Any = num_buckets
SCREAMING_SNAKE_CASE : str = relative_max_distance
SCREAMING_SNAKE_CASE : str = disable_ngram_loss
SCREAMING_SNAKE_CASE : Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : int = use_cache
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , add_cross_attention=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def _A ( self : int ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 62 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'char'
a : Tuple = 'bpe'
a : Optional[Any] = 'wp'
a : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = ['image_processor', 'char_tokenizer']
a : Optional[Any] = 'ViTImageProcessor'
a : int = 'MgpstrTokenizer'
def __init__( self : int , __lowercase : List[str]=None , __lowercase : List[str]=None , **__lowercase : List[Any] ) -> Any:
__UpperCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__UpperCAmelCase : str = tokenizer
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""gpt2""" )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowercase , __lowercase )
def __call__( self : List[Any] , __lowercase : Optional[Any]=None , __lowercase : str=None , __lowercase : List[str]=None , **__lowercase : Tuple ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : Any = self.char_tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCAmelCase : Tuple = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] ) -> int:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = sequences
__UpperCAmelCase : str = char_preds.size(0 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._decode_helper(__lowercase , """char""" )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._decode_helper(__lowercase , """bpe""" )
__UpperCAmelCase , __UpperCAmelCase : int = self._decode_helper(__lowercase , """wp""" )
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
for i in range(__lowercase ):
__UpperCAmelCase : Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__UpperCAmelCase : Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
__UpperCAmelCase : Union[str, Any] = scores.index(max(__lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : List[str] = final_strs
__UpperCAmelCase : Optional[int] = final_scores
__UpperCAmelCase : List[str] = char_strs
__UpperCAmelCase : Any = bpe_strs
__UpperCAmelCase : Tuple = wp_strs
return out
def UpperCAmelCase ( self : Dict , __lowercase : str , __lowercase : Tuple ) -> Any:
if format == DecodeType.CHARACTER:
__UpperCAmelCase : str = self.char_decode
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : List[str] = """[s]"""
elif format == DecodeType.BPE:
__UpperCAmelCase : List[Any] = self.bpe_decode
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : str = """#"""
elif format == DecodeType.WORDPIECE:
__UpperCAmelCase : Any = self.wp_decode
__UpperCAmelCase : Tuple = 102
__UpperCAmelCase : str = """[SEP]"""
else:
raise ValueError(f"""Format {format} is not supported.""" )
__UpperCAmelCase , __UpperCAmelCase : str = [], []
__UpperCAmelCase : Any = pred_logits.size(0 )
__UpperCAmelCase : Optional[Any] = pred_logits.size(1 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=__lowercase , sorted=__lowercase )
__UpperCAmelCase : Dict = preds_index.view(-1 , __lowercase )[:, 1:]
__UpperCAmelCase : Optional[Any] = decoder(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.nn.functional.softmax(__lowercase , dim=2 ).max(dim=2 )
__UpperCAmelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowercase ):
__UpperCAmelCase : Tuple = preds_str[index].find(__lowercase )
__UpperCAmelCase : int = preds_str[index][:pred_eos]
__UpperCAmelCase : Tuple = preds_index[index].cpu().tolist()
__UpperCAmelCase : Dict = pred_index.index(__lowercase ) if eos_token in pred_index else -1
__UpperCAmelCase : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__UpperCAmelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowercase )
conf_scores.append(__lowercase )
return dec_strs, conf_scores
def UpperCAmelCase ( self : Optional[int] , __lowercase : str ) -> Tuple:
__UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowercase )]
return decode_strs
def UpperCAmelCase ( self : Tuple , __lowercase : Dict ) -> Union[str, Any]:
return self.bpe_tokenizer.batch_decode(__lowercase )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowercase )]
return decode_strs
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a : Any = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *__lowercase : Tuple , **__lowercase : Dict ) -> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : int = 0
while b > 0:
if b & 1:
__UpperCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[int] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'realm'
def __init__( self : List[str] , __lowercase : Any=30522 , __lowercase : Any=768 , __lowercase : str=128 , __lowercase : Optional[Any]=12 , __lowercase : Any=12 , __lowercase : int=8 , __lowercase : Any=3072 , __lowercase : Any="gelu_new" , __lowercase : List[Any]=0.1 , __lowercase : Any=0.1 , __lowercase : Optional[Any]=512 , __lowercase : List[str]=2 , __lowercase : List[Any]=0.02 , __lowercase : int=1e-1_2 , __lowercase : int=256 , __lowercase : Tuple=10 , __lowercase : str=1e-3 , __lowercase : Any=5 , __lowercase : List[str]=320 , __lowercase : Optional[int]=13353718 , __lowercase : int=5000 , __lowercase : Optional[int]=1 , __lowercase : Optional[Any]=0 , __lowercase : int=2 , **__lowercase : Tuple , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
# Common config
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[Any] = num_candidates
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Optional[Any] = layer_norm_eps
# Reader config
__UpperCAmelCase : int = span_hidden_size
__UpperCAmelCase : str = max_span_width
__UpperCAmelCase : Tuple = reader_layer_norm_eps
__UpperCAmelCase : List[str] = reader_beam_size
__UpperCAmelCase : str = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Tuple = searcher_beam_size
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=10 , __lowercase : int=[10, 20, 30, 40] , __lowercase : Any=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=True , __lowercase : str="relu" , __lowercase : int=3 , __lowercase : Any=None , ) -> Optional[int]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = embeddings_size
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Any = len(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = TFResNetModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFResNetForImageClassification(__lowercase )
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : List[Any] = False
a : Tuple = False
a : List[Any] = False
a : Union[str, Any] = False
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Any = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Tuple = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1e-4 ) )
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[Any] = checkpoint
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Optional[int] = vae_state_dict["""encoder.conv_in.weight"""]
__UpperCAmelCase : Optional[int] = vae_state_dict["""encoder.conv_in.bias"""]
__UpperCAmelCase : List[str] = vae_state_dict["""encoder.conv_out.weight"""]
__UpperCAmelCase : Any = vae_state_dict["""encoder.conv_out.bias"""]
__UpperCAmelCase : Union[str, Any] = vae_state_dict["""encoder.norm_out.weight"""]
__UpperCAmelCase : int = vae_state_dict["""encoder.norm_out.bias"""]
__UpperCAmelCase : str = vae_state_dict["""decoder.conv_in.weight"""]
__UpperCAmelCase : Optional[Any] = vae_state_dict["""decoder.conv_in.bias"""]
__UpperCAmelCase : Dict = vae_state_dict["""decoder.conv_out.weight"""]
__UpperCAmelCase : Dict = vae_state_dict["""decoder.conv_out.bias"""]
__UpperCAmelCase : Any = vae_state_dict["""decoder.norm_out.weight"""]
__UpperCAmelCase : int = vae_state_dict["""decoder.norm_out.bias"""]
__UpperCAmelCase : str = vae_state_dict["""quant_conv.weight"""]
__UpperCAmelCase : Any = vae_state_dict["""quant_conv.bias"""]
__UpperCAmelCase : Optional[Any] = vae_state_dict["""post_quant_conv.weight"""]
__UpperCAmelCase : int = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__UpperCAmelCase : Tuple = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__UpperCAmelCase : Dict = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__UpperCAmelCase : Tuple = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__UpperCAmelCase : str = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
__UpperCAmelCase : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__UpperCAmelCase : Union[str, Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__UpperCAmelCase : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__UpperCAmelCase : List[str] = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : str = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Any = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__UpperCAmelCase : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : int = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__UpperCAmelCase : int = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Any = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Dict = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__UpperCAmelCase : List[Any] = renew_vae_attention_paths(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = num_up_blocks - 1 - i
__UpperCAmelCase : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__UpperCAmelCase : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__UpperCAmelCase : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__UpperCAmelCase : int = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Tuple = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__UpperCAmelCase : int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Optional[int] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__UpperCAmelCase : Tuple = renew_vae_resnet_paths(__lowerCamelCase )
__UpperCAmelCase : Dict = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
__UpperCAmelCase : Any = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__UpperCAmelCase : List[Any] = renew_vae_attention_paths(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , ):
# Only support V1
__UpperCAmelCase : Tuple = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__UpperCAmelCase : str = io.BytesIO(r.content )
__UpperCAmelCase : List[str] = OmegaConf.load(__lowerCamelCase )
__UpperCAmelCase : Dict = 512
__UpperCAmelCase : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__UpperCAmelCase : int = {}
with safe_open(__lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__UpperCAmelCase : Optional[int] = f.get_tensor(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
__UpperCAmelCase : Union[str, Any] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
__UpperCAmelCase : Any = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
a : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=() , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]="no" , __lowerCamelCase : Dict="29500" ):
__UpperCAmelCase : int = False
__UpperCAmelCase : List[Any] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__UpperCAmelCase : Any = True
elif "IPython" in sys.modules:
__UpperCAmelCase : Optional[Any] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__UpperCAmelCase : Dict = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__UpperCAmelCase : int = 8
__UpperCAmelCase : Optional[Any] = PrepareForLaunch(__lowerCamelCase , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*__lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port=__lowerCamelCase , mixed_precision=__lowerCamelCase ):
__UpperCAmelCase : Dict = PrepareForLaunch(__lowerCamelCase , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__UpperCAmelCase : Union[str, Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any]=() , __lowerCamelCase : Dict=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__UpperCAmelCase : Tuple = PrepareForLaunch(__lowerCamelCase , debug=__lowerCamelCase )
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Optional[int] = logging.get_logger(__name__)
a : int = {"tokenizer_file": "tokenizer.json"}
a : List[Any] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Any = ['input_ids', 'attention_mask']
a : List[Any] = None
def __init__( self : List[str] , __lowercase : str=None , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : Optional[Any]="<unk>" , __lowercase : Union[str, Any]="<s>" , __lowercase : int="</s>" , __lowercase : Any="<pad>" , __lowercase : List[str]=False , __lowercase : Tuple=False , **__lowercase : Any , ) -> Any:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space:
__UpperCAmelCase : Optional[Any] = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = add_prefix_space
__UpperCAmelCase : int = pre_tok_class(**__lowercase )
__UpperCAmelCase : int = add_prefix_space
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : Optional[Any] ) -> BatchEncoding:
__UpperCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Tuple , *__lowercase : Optional[int] , **__lowercase : Optional[int] ) -> BatchEncoding:
__UpperCAmelCase : int = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : "Conversation" ) -> List[int]:
__UpperCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__UpperCAmelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import math
import sys
import cva
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__UpperCAmelCase : int = math.sqrt(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__UpperCAmelCase : Union[str, Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase ):
__UpperCAmelCase : str = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int , ):
__UpperCAmelCase : Optional[Any] = np.zeros(img.shape )
__UpperCAmelCase : int = get_gauss_kernel(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCAmelCase : int = get_slice(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCAmelCase : Optional[Any] = vec_gaussian(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = np.sum(__lowerCamelCase ) / np.sum(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
return imga
def lowerCamelCase__ ( __lowerCamelCase : list ):
__UpperCAmelCase : List[str] = args[1] if args[1:] else """../image_data/lena.jpg"""
__UpperCAmelCase : Optional[Any] = float(args[2] ) if args[2:] else 1.0
__UpperCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCAmelCase : Optional[int] = int(args[4] )
__UpperCAmelCase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCAmelCase : int = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a ,a ,a ,a : Optional[Any] = parse_args(sys.argv)
a : Optional[int] = cva.imread(filename, 0)
cva.imshow("input image", img)
a : int = img / 255
a : Union[str, Any] = out.astype("float32")
a : Any = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Optional[int] = out * 255
a : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
from manim import *
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : List[Any] = Rectangle(height=0.25 , width=0.25 )
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = Text("""CPU""" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : Tuple = [mem.copy() for i in range(4 )]
__UpperCAmelCase : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = Text("""GPU""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
__UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = Text("""Model""" , font_size=24 )
__UpperCAmelCase : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
__UpperCAmelCase : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
__UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Optional[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : Dict = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
__UpperCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
__UpperCAmelCase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
__UpperCAmelCase : List[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
__UpperCAmelCase : Union[str, Any] = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__UpperCAmelCase : List[str] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : List[str] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Tuple = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Tuple = Text("""Disk""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
__UpperCAmelCase : List[Any] = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : Any = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
__UpperCAmelCase : Tuple = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait()
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( __lowerCamelCase : Any ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__UpperCAmelCase : List[Any] = [1, 2, 3]
with pytest.raises(__lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=2 )
with pytest.raises(__lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : int = [1, 2]
__UpperCAmelCase : Dict = {"""a""": 1, """b""": 2}
__UpperCAmelCase : Optional[int] = {"""a""": [1, 2], """b""": [3, 4]}
__UpperCAmelCase : str = {"""a""": {"""1""": 1}, """b""": 2}
__UpperCAmelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__UpperCAmelCase : Tuple = [2, 3]
__UpperCAmelCase : List[str] = {"""a""": 2, """b""": 3}
__UpperCAmelCase : List[Any] = {"""a""": [2, 3], """b""": [4, 5]}
__UpperCAmelCase : str = {"""a""": {"""1""": 2}, """b""": 3}
__UpperCAmelCase : Tuple = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a : Dict = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : Tuple = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Dict = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
a : int = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
a : Any = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
a : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
a : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
a : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = VOCAB_FILES_NAMES
a : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a : str = DPRContextEncoderTokenizer
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = VOCAB_FILES_NAMES
a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a : List[Any] = DPRQuestionEncoderTokenizer
a : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
a : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
a : Tuple = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowercase__ )
class a :
"""simple docstring"""
def __call__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Union[bool, str] = False , __lowercase : Union[bool, str] = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[bool] = None , **__lowercase : List[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
elif titles is None or texts is None:
__UpperCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__UpperCAmelCase : Optional[int] = titles if not isinstance(__lowercase , __lowercase ) else [titles]
__UpperCAmelCase : Dict = texts if not isinstance(__lowercase , __lowercase ) else [texts]
__UpperCAmelCase : Union[str, Any] = len(__lowercase )
__UpperCAmelCase : Dict = questions if not isinstance(__lowercase , __lowercase ) else [questions] * n_passages
assert len(__lowercase ) == len(
__lowercase ), f"""There should be as many titles than texts but got {len(__lowercase )} titles and {len(__lowercase )} texts."""
__UpperCAmelCase : Union[str, Any] = super().__call__(__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""]
__UpperCAmelCase : Any = super().__call__(__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""]
__UpperCAmelCase : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowercase , __lowercase )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : int = attention_mask
return self.pad(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : BatchEncoding , __lowercase : DPRReaderOutput , __lowercase : int = 16 , __lowercase : int = 64 , __lowercase : int = 4 , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : List[Any] = reader_input["""input_ids"""]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = reader_output[:3]
__UpperCAmelCase : Any = len(__lowercase )
__UpperCAmelCase : Union[str, Any] = sorted(range(__lowercase ) , reverse=__lowercase , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Dict = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : Any = len(__lowercase )
__UpperCAmelCase : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowercase , top_spans=__lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowercase , start_index=__lowercase , end_index=__lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : List[int] , __lowercase : int , __lowercase : int , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : Union[str, Any] = []
for start_index, start_score in enumerate(__lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : Tuple = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
__UpperCAmelCase : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
__UpperCAmelCase : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowercase__ )
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Optional[int] = VOCAB_FILES_NAMES
a : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
a : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
a : List[Any] = ['input_ids', 'attention_mask']
a : Optional[Any] = DPRReaderTokenizer
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import re
from filelock import FileLock
try:
import nltk
a : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( __lowerCamelCase : str ):
re.sub("""<n>""" , """""" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
a : List[str] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
a : List[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : str = StableDiffusionXLImgaImgPipeline
a : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
a : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : int ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
__UpperCAmelCase : Optional[int] = CLIPTextModel(__lowercase )
__UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
__UpperCAmelCase : List[str] = CLIPTextModelWithProjection(__lowercase )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
__UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] , __lowercase : Union[str, Any]=0 ) -> str:
__UpperCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : List[Any] = image / 2 + 0.5
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Tuple = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Tuple = StableDiffusionXLImgaImgPipeline(**__lowercase )
__UpperCAmelCase : Dict = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Union[str, Any] = sd_pipe(**__lowercase ).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : str = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : List[str] = StableDiffusionXLImgaImgPipeline(**__lowercase )
__UpperCAmelCase : List[str] = sd_pipe.to(__lowercase )
__UpperCAmelCase : str = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Tuple = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Union[str, Any] = negative_prompt
__UpperCAmelCase : Dict = 3 * [inputs["""prompt"""]]
__UpperCAmelCase : Union[str, Any] = sd_pipe(**__lowercase )
__UpperCAmelCase : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__UpperCAmelCase : int = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__UpperCAmelCase : Optional[Any] = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__UpperCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any , __lowercase : Dict , __lowercase : Union[str, Any]="cpu" , __lowercase : List[str]=torch.floataa , __lowercase : List[str]=0 ) -> Any:
__UpperCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Tuple = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__UpperCAmelCase : str = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : Tuple = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Dict:
__UpperCAmelCase : Optional[Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = self.get_inputs(__lowercase )
__UpperCAmelCase : Tuple = pipe(**__lowercase ).images
__UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Tuple = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : Any , __lowercase : List[str]=2 , __lowercase : List[str]=True , __lowercase : Dict=False , __lowercase : Optional[int]=10 , __lowercase : str=3 , __lowercase : Any=32 * 4 , __lowercase : Optional[Any]=32 * 6 , __lowercase : List[str]=4 , __lowercase : Optional[int]=32 , ) -> str:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : str = is_training
__UpperCAmelCase : List[Any] = use_auxiliary_loss
__UpperCAmelCase : Any = num_queries
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Union[str, Any] = min_size
__UpperCAmelCase : Tuple = max_size
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[str] = mask_feature_size
def UpperCAmelCase ( self : Tuple ) -> List[str]:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
__UpperCAmelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
__UpperCAmelCase : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
__UpperCAmelCase : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self : str ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self : List[str] , __lowercase : Dict , __lowercase : int ) -> List[str]:
__UpperCAmelCase : Dict = output.encoder_hidden_states
__UpperCAmelCase : Union[str, Any] = output.pixel_decoder_hidden_states
__UpperCAmelCase : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : int=False ) -> Tuple:
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
__UpperCAmelCase : str = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase : str = model(pixel_values=__lowercase , pixel_mask=__lowercase )
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
comm_check_on_output(__lowercase )
__UpperCAmelCase : int = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a : List[str] = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a : Dict = False
a : Any = False
a : Optional[Any] = False
a : Dict = False
def UpperCAmelCase ( self : Any ) -> Dict:
__UpperCAmelCase : Dict = MaskFormerModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase ( self : Optional[int] ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
pass
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__UpperCAmelCase : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : int = (self.model_tester.min_size,) * 2
__UpperCAmelCase : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowercase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowercase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowercase ).long(),
}
__UpperCAmelCase : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
__UpperCAmelCase : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(__lowercase ).to(__lowercase )
__UpperCAmelCase : Optional[int] = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase ( self : int ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__UpperCAmelCase : Optional[int] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
__UpperCAmelCase : List[Any] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Optional[int] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__UpperCAmelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : Dict = 1e-4
def lowerCamelCase__ ( ):
__UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : str ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__lowercase )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**__lowercase )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__UpperCAmelCase : Any = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : Any = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowercase )
# masks_queries_logits
__UpperCAmelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCAmelCase : str = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
__UpperCAmelCase : Dict = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
__UpperCAmelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Any = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : int = model(**__lowercase )
# masks_queries_logits
__UpperCAmelCase : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCAmelCase : Tuple = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
__UpperCAmelCase : Dict = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
__UpperCAmelCase : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__UpperCAmelCase : int = inputs["""pixel_values"""].to(__lowercase )
__UpperCAmelCase : str = [el.to(__lowercase ) for el in inputs["""mask_labels"""]]
__UpperCAmelCase : Optional[Any] = [el.to(__lowercase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
a : Union[str, Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'speech_to_text_2'
a : Optional[int] = ['past_key_values']
a : Union[str, Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , __lowercase : Union[str, Any]=10000 , __lowercase : List[Any]=6 , __lowercase : Tuple=2048 , __lowercase : int=4 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : Optional[int]="relu" , __lowercase : Optional[int]=256 , __lowercase : int=0.1 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=2 , __lowercase : List[Any]=True , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Tuple=2 , __lowercase : List[Any]=1024 , **__lowercase : Tuple , ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : int = d_model
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = decoder_layerdrop
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : Dict = decoder_layers
__UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : str = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = LEDTokenizer
a : Optional[int] = LEDTokenizerFast
a : List[str] = True
def UpperCAmelCase ( self : Tuple ) -> str:
super().setUp()
__UpperCAmelCase : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : Optional[Any] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Any = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def UpperCAmelCase ( self : Any , **__lowercase : Dict ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : List[str] , **__lowercase : Any ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any] ) -> Optional[int]:
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def UpperCAmelCase ( self : List[Any] ) -> Dict:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Tuple = tokenizer(__lowercase , max_length=len(__lowercase ) , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
@require_torch
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __lowercase )
self.assertIn("""attention_mask""" , __lowercase )
self.assertNotIn("""labels""" , __lowercase )
self.assertNotIn("""decoder_attention_mask""" , __lowercase )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(text_target=__lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def UpperCAmelCase ( self : str ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCAmelCase ( self : Dict ) -> Any:
__UpperCAmelCase : Dict = ["""A long paragraph for summarization."""]
__UpperCAmelCase : str = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[Any] = tokenizer(__lowercase , return_tensors="""pt""" )
__UpperCAmelCase : Union[str, Any] = tokenizer(text_target=__lowercase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : str = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase ( self : int ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : str = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : List[str] = [[0] * len(__lowercase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : Optional[Any] = tokenizer.pad(__lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __lowercase )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self : Tuple ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__UpperCAmelCase : Tuple = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
__UpperCAmelCase : Any = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
a : Optional[int] = 9.8_06_65
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float = g ):
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Dict = KandinskyVaaPriorPipeline
a : List[Any] = ['prompt']
a : int = ['prompt', 'negative_prompt']
a : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
a : Any = False
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return 32
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
return 32
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
return 100
@property
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__UpperCAmelCase : Any = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : str = self.dummy_prior
__UpperCAmelCase : List[str] = self.dummy_image_encoder
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Optional[int] = self.dummy_tokenizer
__UpperCAmelCase : Any = self.dummy_image_processor
__UpperCAmelCase : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__UpperCAmelCase : Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Tuple=0 ) -> int:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : str = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : str = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = """cpu"""
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase )
__UpperCAmelCase : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Any = output.image_embeds
__UpperCAmelCase : str = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : int = image[0, -10:]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCAmelCase : str = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = torch_device == """cpu"""
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = torch_device == """cpu"""
__UpperCAmelCase : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from __future__ import annotations
from random import random
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int | None = None ) -> Any:
__UpperCAmelCase : Optional[Any] = value
__UpperCAmelCase : Dict = random()
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def __repr__( self : Optional[int] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ) -> str:
__UpperCAmelCase : Dict = str(self.value ) + """ """
__UpperCAmelCase : List[str] = str(self.left or """""" )
__UpperCAmelCase : Optional[Any] = str(self.right or """""" )
return value + left + right
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__UpperCAmelCase , __UpperCAmelCase : List[str] = split(root.left , __lowerCamelCase )
return left, root
else:
__UpperCAmelCase , __UpperCAmelCase : Any = split(root.right , __lowerCamelCase )
return root, right
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : Node | None ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__UpperCAmelCase : Optional[Any] = merge(left.right , __lowerCamelCase )
return left
else:
__UpperCAmelCase : List[Any] = merge(__lowerCamelCase , right.left )
return right
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = Node(__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : List[str] = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = split(__lowerCamelCase , value - 1 )
__UpperCAmelCase , __UpperCAmelCase : Any = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : str ):
for arg in args.split():
if arg[0] == "+":
__UpperCAmelCase : Tuple = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
__UpperCAmelCase : Union[str, Any] = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
__UpperCAmelCase : int = input()
while args != "q":
__UpperCAmelCase : Union[str, Any] = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
__UpperCAmelCase : Dict = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : List[str] = get_activation("""gelu""" )
__UpperCAmelCase : Tuple = get_activation("""gelu_10""" )
__UpperCAmelCase : int = torch_builtin(__lowercase )
__UpperCAmelCase : Union[str, Any] = geluaa(__lowercase )
__UpperCAmelCase : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__lowercase ):
get_activation("""bogus""" )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : Tuple = get_activation("""gelu""" )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
__UpperCAmelCase : List[Any] = acta.a
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
a : str = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : Any = [{"type": "code", "content": INSTALL_CONTENT}]
a : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
from __future__ import annotations
import requests
a : List[Any] = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "new" , __lowerCamelCase : list | None = None ):
__UpperCAmelCase : int = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowerCamelCase ) - valid_terms ) ):
__UpperCAmelCase : List[Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
__UpperCAmelCase : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowerCamelCase )}
__UpperCAmelCase : Tuple = {}
for id_ in range(__lowerCamelCase ):
__UpperCAmelCase : str = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
import torch
from diffusers import DiffusionPipeline
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Any:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
def __call__( self : Union[str, Any] ) -> str:
__UpperCAmelCase : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : List[str] = self.unet(__lowercase , __lowercase ).sample
__UpperCAmelCase : str = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__UpperCAmelCase : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(__lowercase )
return result
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__UpperCAmelCase : Any = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : Tuple = jax.device_count()
__UpperCAmelCase : List[Any] = num_samples * [prompt]
__UpperCAmelCase : str = sd_pipe.prepare_inputs(__lowercase )
__UpperCAmelCase : List[str] = replicate(__lowercase )
__UpperCAmelCase : Optional[int] = shard(__lowercase )
__UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
__UpperCAmelCase : int = jax.random.split(__lowercase , jax.device_count() )
__UpperCAmelCase : Optional[Any] = sd_pipe(__lowercase , __lowercase , __lowercase , num_inference_steps=25 , jit=__lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : Dict = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = """stabilityai/stable-diffusion-2"""
__UpperCAmelCase , __UpperCAmelCase : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowercase , subfolder="""scheduler""" )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , revision="""bf16""" , dtype=jnp.bfloataa , )
__UpperCAmelCase : str = scheduler_params
__UpperCAmelCase : Tuple = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : List[str] = jax.device_count()
__UpperCAmelCase : List[Any] = num_samples * [prompt]
__UpperCAmelCase : Optional[int] = sd_pipe.prepare_inputs(__lowercase )
__UpperCAmelCase : Dict = replicate(__lowercase )
__UpperCAmelCase : Optional[Any] = shard(__lowercase )
__UpperCAmelCase : Any = jax.random.PRNGKey(0 )
__UpperCAmelCase : Union[str, Any] = jax.random.split(__lowercase , jax.device_count() )
__UpperCAmelCase : List[Any] = sd_pipe(__lowercase , __lowercase , __lowercase , num_inference_steps=25 , jit=__lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCAmelCase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : List[Any] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : Union[str, Any] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a : str = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
a : str = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
a : int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
a : str = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
a : List[str] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
a : List[str] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
a : Tuple = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def lowerCamelCase__ ( ):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = randrange(len(__lowerCamelCase ) ), randrange(len(__lowerCamelCase ) )
__UpperCAmelCase : str = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__UpperCAmelCase , __UpperCAmelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase__ ( __lowerCamelCase : int = 100 ):
return (generate_random_hand() for _ in range(__lowerCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : int ):
assert PokerHand(__lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ):
assert PokerHand(__lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : int = PokerHand(__lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
assert PokerHand(__lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
assert PokerHand(__lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
assert PokerHand(__lowerCamelCase ).compare_with(PokerHand(__lowerCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
assert PokerHand(__lowerCamelCase ).compare_with(PokerHand(__lowerCamelCase ) ) == expected
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = [PokerHand(__lowerCamelCase ) for hand in SORTED_HANDS]
__UpperCAmelCase : Any = poker_hands.copy()
shuffle(__lowerCamelCase )
__UpperCAmelCase : str = chain(sorted(__lowerCamelCase ) )
for index, hand in enumerate(__lowerCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase__ ( ):
# Test that five high straights are compared correctly.
__UpperCAmelCase : Any = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase__ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__UpperCAmelCase : List[Any] = PokerHand("""2C 4S AS 3D 5C""" )
__UpperCAmelCase : str = True
__UpperCAmelCase : Dict = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase__ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = os.path.abspath(os.path.dirname(__lowerCamelCase ) )
__UpperCAmelCase : Dict = os.path.join(__lowerCamelCase , """poker_hands.txt""" )
with open(__lowerCamelCase ) as file_hand:
for line in file_hand:
__UpperCAmelCase : Optional[Any] = line[:14].strip()
__UpperCAmelCase : List[Any] = line[15:].strip()
__UpperCAmelCase , __UpperCAmelCase : List[str] = PokerHand(__lowerCamelCase ), PokerHand(__lowerCamelCase )
__UpperCAmelCase : Dict = player.compare_with(__lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Union[str, Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = 'mobilenet_v2'
def __init__( self : Tuple , __lowercase : List[Any]=3 , __lowercase : List[str]=224 , __lowercase : Tuple=1.0 , __lowercase : Union[str, Any]=8 , __lowercase : Tuple=8 , __lowercase : List[Any]=6 , __lowercase : Optional[int]=32 , __lowercase : List[Any]=True , __lowercase : Any=True , __lowercase : List[Any]="relu6" , __lowercase : Tuple=True , __lowercase : str=0.8 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=0.001 , __lowercase : Union[str, Any]=255 , **__lowercase : str , ) -> Optional[int]:
super().__init__(**__lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = depth_multiplier
__UpperCAmelCase : Dict = depth_divisible_by
__UpperCAmelCase : Union[str, Any] = min_depth
__UpperCAmelCase : Optional[int] = expand_ratio
__UpperCAmelCase : List[Any] = output_stride
__UpperCAmelCase : Any = first_layer_is_expansion
__UpperCAmelCase : str = finegrained_output
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[int] = tf_padding
__UpperCAmelCase : Optional[int] = classifier_dropout_prob
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : str = semantic_loss_ignore_index
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCAmelCase ( self : List[str] ) -> float:
return 1e-4
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a ( unittest.TestCase ):
"""simple docstring"""
a : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Dict ) -> int:
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__UpperCAmelCase : Any = VideoClassificationPipeline(model=__lowercase , image_processor=__lowercase , top_k=2 )
__UpperCAmelCase : Any = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def UpperCAmelCase ( self : str , __lowercase : Any , __lowercase : Union[str, Any] ) -> str:
for example in examples:
__UpperCAmelCase : Any = video_classifier(__lowercase )
self.assertEqual(
__lowercase , [
{"""score""": ANY(__lowercase ), """label""": ANY(__lowercase )},
{"""score""": ANY(__lowercase ), """label""": ANY(__lowercase )},
] , )
@require_torch
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__UpperCAmelCase : int = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
__UpperCAmelCase : Dict = pipeline(
"""video-classification""" , model=__lowercase , feature_extractor=__lowercase , frame_sampling_rate=4 )
__UpperCAmelCase : Tuple = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__UpperCAmelCase : List[str] = video_classifier(__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
__UpperCAmelCase : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
pass
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : List[str]=False ):
__UpperCAmelCase : Tuple = OmegaConf.load(__lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__lowerCamelCase ) ) )
return config
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None ):
if conf_path is None:
__UpperCAmelCase : Union[str, Any] = """./model_checkpoints/vqgan_only.yaml"""
__UpperCAmelCase : Optional[int] = load_config(__lowerCamelCase , display=__lowerCamelCase )
__UpperCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
__UpperCAmelCase : str = """./model_checkpoints/vqgan_only.pt"""
__UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
if ".ckpt" in ckpt_path:
__UpperCAmelCase : int = sd["""state_dict"""]
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.to(__lowerCamelCase )
del sd
return model
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = model.encode(__lowerCamelCase )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__UpperCAmelCase : List[Any] = model.decode(__lowerCamelCase )
return xrec
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False ):
__UpperCAmelCase , __UpperCAmelCase : Any = string.rsplit(""".""" , 1 )
if reload:
__UpperCAmelCase : List[Any] = importlib.import_module(__lowerCamelCase )
importlib.reload(__lowerCamelCase )
return getattr(importlib.import_module(__lowerCamelCase , package=__lowerCamelCase ) , cls )
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=True ):
__UpperCAmelCase : Optional[int] = instantiate_from_config(__lowerCamelCase )
if sd is not None:
model.load_state_dict(__lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
# load the specified checkpoint
if ckpt:
__UpperCAmelCase : Optional[Any] = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Optional[int] = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
__UpperCAmelCase : Tuple = {"""state_dict""": None}
__UpperCAmelCase : int = None
__UpperCAmelCase : int = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__lowerCamelCase , eval_mode=__lowerCamelCase )["""model"""]
return model, global_step
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import qiskit
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCAmelCase : str = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
a : List[str] = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
import functools
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
__UpperCAmelCase : List[str] = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : str = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : Optional[Any] = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Dict = features.copy() if features else default_expected_features
__UpperCAmelCase : Optional[Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : Dict = ParquetDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : List[str] = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
if issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : int = parquet_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = [parquet_path]
__UpperCAmelCase : Any = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Union[str, Any] = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=("train",) ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
__UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = tmp_path / """cache"""
__UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : Any = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = tmp_path / """cache"""
__UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
__UpperCAmelCase : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : Optional[Any] = ParquetDatasetReader({"""train""": parquet_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
if split:
__UpperCAmelCase : Union[str, Any] = {split: parquet_path}
else:
__UpperCAmelCase : str = """train"""
__UpperCAmelCase : int = {"""train""": parquet_path, """test""": parquet_path}
__UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
__UpperCAmelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : List[Any] = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Any = ParquetDatasetWriter(__lowerCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__UpperCAmelCase : int = pq.ParquetFile(tmp_path / """foo.parquet""" )
__UpperCAmelCase : int = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
__UpperCAmelCase : Optional[int] = {"""image""": [image_path]}
__UpperCAmelCase : List[Any] = Features({"""image""": Image()} )
__UpperCAmelCase : Dict = Dataset.from_dict(__lowerCamelCase , features=__lowerCamelCase )
__UpperCAmelCase : int = ParquetDatasetWriter(__lowerCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__UpperCAmelCase : str = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
__UpperCAmelCase : int = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
assert get_writer_batch_size(__lowerCamelCase ) == expected
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : str = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : str ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : int | None = None ) -> str:
__UpperCAmelCase : int = value
__UpperCAmelCase : Node | None = None # Added in order to delete a node easier
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def __repr__( self : Tuple ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Node | None = None ) -> int:
__UpperCAmelCase : List[Any] = root
def __str__( self : Dict ) -> str:
return str(self.root )
def UpperCAmelCase ( self : Any , __lowercase : Node , __lowercase : Node | None ) -> None:
if new_children is not None: # reset its kids
__UpperCAmelCase : int = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
__UpperCAmelCase : List[Any] = new_children
else:
__UpperCAmelCase : Tuple = new_children
else:
__UpperCAmelCase : List[Any] = new_children
def UpperCAmelCase ( self : int , __lowercase : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase ( self : Union[str, Any] ) -> bool:
return self.root is None
def UpperCAmelCase ( self : List[Any] , __lowercase : Any ) -> None:
__UpperCAmelCase : int = Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCAmelCase : int = new_node # set its root
else: # Tree is not empty
__UpperCAmelCase : List[str] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCAmelCase : Dict = new_node # We insert the new node in a leaf
break
else:
__UpperCAmelCase : Dict = parent_node.left
else:
if parent_node.right is None:
__UpperCAmelCase : Union[str, Any] = new_node
break
else:
__UpperCAmelCase : Optional[Any] = parent_node.right
__UpperCAmelCase : int = parent_node
def UpperCAmelCase ( self : Any , *__lowercase : Any ) -> None:
for value in values:
self.__insert(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : str ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__UpperCAmelCase : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCAmelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase ( self : Dict , __lowercase : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__UpperCAmelCase : Tuple = self.root
if not self.empty():
while node.right is not None:
__UpperCAmelCase : Optional[Any] = node.right
return node
def UpperCAmelCase ( self : str , __lowercase : Node | None = None ) -> Node | None:
if node is None:
__UpperCAmelCase : Tuple = self.root
if self.root is None:
return None
if not self.empty():
__UpperCAmelCase : str = self.root
while node.left is not None:
__UpperCAmelCase : List[str] = node.left
return node
def UpperCAmelCase ( self : str , __lowercase : int ) -> None:
__UpperCAmelCase : List[Any] = self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
__UpperCAmelCase : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCAmelCase : str = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase ( self : Dict , __lowercase : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase ( self : Tuple , __lowercase : str=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase ( self : Tuple , __lowercase : list , __lowercase : Node | None ) -> None:
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def UpperCAmelCase ( self : str , __lowercase : int , __lowercase : Node ) -> int:
__UpperCAmelCase : list[int] = []
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : int = []
if curr_node is not None:
__UpperCAmelCase : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__UpperCAmelCase : Optional[Any] = BinarySearchTree()
for i in testlist:
t.insert(__lowerCamelCase )
# Prints all the elements of the list in order traversal
print(__lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCamelCase )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__UpperCAmelCase : List[str] = sum(__lowerCamelCase ) / len(__lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : str , __lowercase : Dict , __lowercase : Optional[Any]=13 , __lowercase : str=32 , __lowercase : Union[str, Any]=3 , __lowercase : int=4 , __lowercase : int=[10, 20, 30, 40] , __lowercase : List[str]=[2, 2, 3, 2] , __lowercase : Dict=True , __lowercase : List[Any]=True , __lowercase : str=37 , __lowercase : Tuple="gelu" , __lowercase : int=10 , __lowercase : Dict=0.02 , __lowercase : Dict=["stage2", "stage3", "stage4"] , __lowercase : Optional[int]=[2, 3, 4] , __lowercase : List[Any]=None , ) -> Dict:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = num_stages
__UpperCAmelCase : str = hidden_sizes
__UpperCAmelCase : Optional[int] = depths
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict ) -> str:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Tuple = ConvNextVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = ConvNextVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : List[str] ) -> Tuple:
__UpperCAmelCase : int = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a : List[Any] = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a : List[str] = False
a : Union[str, Any] = False
a : Optional[Any] = False
a : Optional[Any] = False
a : Tuple = False
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : int ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Any ) -> str:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCAmelCase ( self : Tuple ) -> str:
pass
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : int = True
if model_class.__name__ in [
*get_values(__lowercase ),
*get_values(__lowercase ),
]:
continue
__UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Dict = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : str = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
if (
model_class.__name__
in [*get_values(__lowercase ), *get_values(__lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : Tuple = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : int = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : int = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict ):
__UpperCAmelCase : str = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowercase )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : List[Any] = preprocessor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
def update_area_of_max_square(__lowerCamelCase : int , __lowerCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCAmelCase : Optional[Any] = update_area_of_max_square(__lowerCamelCase , col + 1 )
__UpperCAmelCase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
__UpperCAmelCase : List[Any] = update_area_of_max_square(row + 1 , __lowerCamelCase )
if mat[row][col]:
__UpperCAmelCase : Dict = 1 + min([right, diagonal, down] )
__UpperCAmelCase : Any = max(largest_square_area[0] , __lowerCamelCase )
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Optional[int] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCAmelCase : Any = update_area_of_max_square_using_dp_array(__lowerCamelCase , col + 1 , __lowerCamelCase )
__UpperCAmelCase : Dict = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCamelCase )
__UpperCAmelCase : Tuple = update_area_of_max_square_using_dp_array(row + 1 , __lowerCamelCase , __lowerCamelCase )
if mat[row][col]:
__UpperCAmelCase : List[str] = 1 + min([right, diagonal, down] )
__UpperCAmelCase : List[str] = max(largest_square_area[0] , __lowerCamelCase )
__UpperCAmelCase : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Optional[int] = [0]
__UpperCAmelCase : Optional[Any] = [[-1] * cols for _ in range(__lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCamelCase )
return largest_square_area[0]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
__UpperCAmelCase : int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCAmelCase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : Optional[Any] = dp_array[row][col + 1]
__UpperCAmelCase : Tuple = dp_array[row + 1][col + 1]
__UpperCAmelCase : Union[str, Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCAmelCase : str = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = max(dp_array[row][col] , __lowerCamelCase )
else:
__UpperCAmelCase : List[str] = 0
return largest_square_area
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
__UpperCAmelCase : Dict = [0] * (cols + 1)
__UpperCAmelCase : List[Any] = [0] * (cols + 1)
__UpperCAmelCase : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : Dict = current_row[col + 1]
__UpperCAmelCase : List[str] = next_row[col + 1]
__UpperCAmelCase : List[Any] = next_row[col]
if mat[row][col] == 1:
__UpperCAmelCase : Optional[int] = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = max(current_row[col] , __lowerCamelCase )
else:
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a : List[Any] = "http://www.mocksite.com/file1.txt"
a : Dict = "\"text\": [\"foo\", \"foo\"]"
a : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class a :
"""simple docstring"""
a : Dict = 200
a : Optional[int] = {'Content-Length': '100'}
a : Dict = {}
def UpperCAmelCase ( self : Union[str, Any] , **__lowercase : Union[str, Any] ) -> Any:
return [bytes(__lowercase , """utf-8""" )]
def lowerCamelCase__ ( *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
import requests
monkeypatch.setattr(__lowerCamelCase , """request""" , __lowerCamelCase )
__UpperCAmelCase : List[str] = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : str = {"""train""": url}
__UpperCAmelCase : Optional[Any] = """dummy"""
__UpperCAmelCase : Any = """downloads"""
__UpperCAmelCase : Optional[int] = tmp_path
__UpperCAmelCase : List[str] = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
__UpperCAmelCase : str = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = dl_manager.download(__lowerCamelCase )
__UpperCAmelCase : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = [downloaded_paths]
__UpperCAmelCase : Optional[Any] = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__UpperCAmelCase : List[Any] = downloaded_paths.values()
__UpperCAmelCase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCAmelCase : List[str] = Path(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCAmelCase : Tuple = downloaded_path.read_text()
assert content == CONTENT
__UpperCAmelCase : Any = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__UpperCAmelCase : str = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict ):
__UpperCAmelCase : str = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = {"""train""": filename}
__UpperCAmelCase : Tuple = """dummy"""
__UpperCAmelCase : Any = xz_file.parent
__UpperCAmelCase : List[str] = """extracted"""
__UpperCAmelCase : Tuple = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
__UpperCAmelCase : Dict = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
__UpperCAmelCase : str = dl_manager.extract(__lowerCamelCase )
__UpperCAmelCase : str = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = [extracted_paths]
__UpperCAmelCase : Tuple = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
__UpperCAmelCase : Union[str, Any] = extracted_paths.values()
__UpperCAmelCase : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
__UpperCAmelCase : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCAmelCase : str = extracted_path.read_text()
__UpperCAmelCase : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
__UpperCAmelCase : str = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Any = request.getfixturevalue(__lowerCamelCase )
__UpperCAmelCase : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
__UpperCAmelCase : str = request.getfixturevalue(__lowerCamelCase )
__UpperCAmelCase : List[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ):
def get_masked_lm_array(__lowerCamelCase : str ):
__UpperCAmelCase : str = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Optional[int] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_array(__lowerCamelCase : str ):
__UpperCAmelCase : Optional[int] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Union[str, Any] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : int = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_layer_array(__lowerCamelCase : int , __lowerCamelCase : str ):
__UpperCAmelCase : List[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Union[str, Any] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_attention_layer_array(__lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Union[str, Any] = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Tuple = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = array.reshape(__lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : Any = array.transpose()
return torch.from_numpy(__lowerCamelCase )
print(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase : List[Any] = BertConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[Any] = BertForMaskedLM(__lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__UpperCAmelCase : BertSelfAttention = layer.attention.self
__UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
__UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
__UpperCAmelCase : Any = get_encoder_attention_layer_array(
__lowerCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
__UpperCAmelCase : int = get_encoder_attention_layer_array(
__lowerCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
__UpperCAmelCase : Any = get_encoder_attention_layer_array(
__lowerCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
__UpperCAmelCase : int = get_encoder_attention_layer_array(
__lowerCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
__UpperCAmelCase : BertSelfOutput = layer.attention.output
__UpperCAmelCase : Optional[Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
__UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
__lowerCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
__UpperCAmelCase : List[str] = get_encoder_layer_array(__lowerCamelCase , """_attention_layer_norm/gamma""" )
__UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__lowerCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
__UpperCAmelCase : BertIntermediate = layer.intermediate
__UpperCAmelCase : int = get_encoder_layer_array(__lowerCamelCase , """_intermediate_dense/kernel""" )
__UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(__lowerCamelCase , """_intermediate_dense/bias""" )
# Output
__UpperCAmelCase : BertOutput = layer.output
__UpperCAmelCase : Optional[int] = get_encoder_layer_array(__lowerCamelCase , """_output_dense/kernel""" )
__UpperCAmelCase : int = get_encoder_layer_array(__lowerCamelCase , """_output_dense/bias""" )
__UpperCAmelCase : Dict = get_encoder_layer_array(__lowerCamelCase , """_output_layer_norm/gamma""" )
__UpperCAmelCase : Dict = get_encoder_layer_array(__lowerCamelCase , """_output_layer_norm/beta""" )
# Embeddings
__UpperCAmelCase : Union[str, Any] = get_encoder_array("""_position_embedding_layer/embeddings""" )
__UpperCAmelCase : Tuple = get_encoder_array("""_type_embedding_layer/embeddings""" )
__UpperCAmelCase : List[str] = get_encoder_array("""_embedding_norm_layer/gamma""" )
__UpperCAmelCase : Dict = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
__UpperCAmelCase : List[str] = model.cls.predictions.transform
__UpperCAmelCase : Tuple = get_masked_lm_array("""dense/kernel""" )
__UpperCAmelCase : List[str] = get_masked_lm_array("""dense/bias""" )
__UpperCAmelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
__UpperCAmelCase : Optional[Any] = get_masked_lm_array("""layer_norm/beta""" )
__UpperCAmelCase : Optional[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
__UpperCAmelCase : Dict = BertPooler(config=__lowerCamelCase )
__UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
__UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(__lowerCamelCase )
# Integration test - should load without any errors ;)
__UpperCAmelCase : List[str] = BertForMaskedLM.from_pretrained(__lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Union[str, Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a : List[Any] = logging.get_logger(__name__)
a : str = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Any = 'imagegpt'
a : Optional[Any] = ['past_key_values']
a : Tuple = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Dict=512 + 1 , __lowercase : int=32 * 32 , __lowercase : List[str]=512 , __lowercase : Dict=24 , __lowercase : Optional[int]=8 , __lowercase : Tuple=None , __lowercase : Tuple="quick_gelu" , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=1e-5 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : Union[str, Any]=False , __lowercase : Any=False , **__lowercase : Optional[Any] , ) -> List[Any]:
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Union[str, Any] = n_positions
__UpperCAmelCase : int = n_embd
__UpperCAmelCase : int = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = n_inner
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : Tuple = resid_pdrop
__UpperCAmelCase : Optional[int] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : List[str] = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Optional[int] = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : Optional[int] = reorder_and_upcast_attn
__UpperCAmelCase : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : "FeatureExtractionMixin" , __lowercase : int = 1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 32 , __lowercase : int = 32 , ) -> Mapping[str, Any]:
__UpperCAmelCase : str = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Any = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
return inputs
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Dict = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
return np.maximum(0 , __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
__UpperCAmelCase : Any = """"""
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'vit_msn'
def __init__( self : Any , __lowercase : Any=768 , __lowercase : Union[str, Any]=12 , __lowercase : Any=12 , __lowercase : int=3072 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Tuple=1e-0_6 , __lowercase : Dict=224 , __lowercase : List[str]=16 , __lowercase : List[str]=3 , __lowercase : Dict=True , **__lowercase : Tuple , ) -> Dict:
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : Optional[int] = qkv_bias
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : int = 16
a : Dict = 32
def lowerCamelCase__ ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 ):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Dict = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : str = 8
else:
__UpperCAmelCase : Optional[Any] = None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : List[Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCamelCase ) == "1":
__UpperCAmelCase : Union[str, Any] = 2
# Initialize accelerator
__UpperCAmelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Union[str, Any] = config["""lr"""]
__UpperCAmelCase : Dict = int(config["""num_epochs"""] )
__UpperCAmelCase : Tuple = int(config["""seed"""] )
__UpperCAmelCase : Any = int(config["""batch_size"""] )
__UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase : str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : Dict = model(**__lowerCamelCase )
__UpperCAmelCase : int = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
__UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a : int = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None ):
require_version(deps[pkg] , __lowerCamelCase )
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a : Tuple = logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=16 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 2 ):
def get_dataset(__lowerCamelCase : List[Any] ):
__UpperCAmelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase : int = get_dataset(__lowerCamelCase )
__UpperCAmelCase : Tuple = get_dataset(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
__UpperCAmelCase : int = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple=None ):
__UpperCAmelCase : Dict = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = batch
__UpperCAmelCase : str = model(__lowerCamelCase )
__UpperCAmelCase : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
super().__init__()
__UpperCAmelCase : Tuple = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase : str = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[str] ) -> Union[str, Any]:
return x * self.a + self.b
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : str = dummy_dataloaders()
__UpperCAmelCase : int = ProjectConfiguration(total_limit=1 , project_dir=__lowercase , automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Optional[int] = Accelerator(project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : List[str] = DummyModel()
__UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
__UpperCAmelCase : int = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
__UpperCAmelCase : Any = os.path.join(__lowercase , """initial""" )
accelerator.save_state(__lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : int = optimizer.state_dict()
__UpperCAmelCase : str = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : int = model.a.item(), model.b.item()
__UpperCAmelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : int = DummyModel()
__UpperCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : Optional[Any] = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(__lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , """checkpoint""" )
accelerator.save_state(__lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowercase )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
__UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Any = DummyModel()
__UpperCAmelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Dict = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
__UpperCAmelCase : Dict = optimizer.state_dict()
__UpperCAmelCase : Tuple = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : int = model.a.item(), model.b.item()
__UpperCAmelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dummy_dataloaders()
__UpperCAmelCase : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowercase )
__UpperCAmelCase : Any = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) )
((__UpperCAmelCase) , (__UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
__UpperCAmelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
__UpperCAmelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = torch.tensor([1, 2, 3] )
__UpperCAmelCase : Dict = torch.tensor([2, 3, 4] )
__UpperCAmelCase : Union[str, Any] = DummyModel()
__UpperCAmelCase : str = torch.optim.Adam(net.parameters() )
__UpperCAmelCase : str = Accelerator()
with self.assertRaises(__lowercase ) as ve:
accelerator.register_for_checkpointing(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCAmelCase ( self : Dict ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : int = DummyModel()
__UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase : Dict = torch.optim.lr_scheduler.StepLR(__lowercase , step_size=1 , gamma=0.99 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Optional[Any] = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
__UpperCAmelCase : Optional[int] = scheduler.state_dict()
train(3 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertNotEqual(__lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__lowercase , scheduler.state_dict() )
def UpperCAmelCase ( self : Any ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Optional[int] = DummyModel()
__UpperCAmelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase , total_limit=2 )
# Train baseline
__UpperCAmelCase : Dict = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase : int = accelerator.prepare(__lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : List[str] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowercase , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = "/tmp/accelerate/state_checkpointing"
a : Any = DummyModel()
a : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
a : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a ,a : List[Any] = dummy_dataloaders()
a : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a ,a ,a ,a ,a : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a ,a : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a : Union[str, Any] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a : str = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a : Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
__UpperCAmelCase : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
__UpperCAmelCase : List[str] = downstream_dict["""projector.weight"""]
__UpperCAmelCase : str = downstream_dict["""projector.bias"""]
__UpperCAmelCase : Optional[int] = downstream_dict["""model.post_net.linear.weight"""]
__UpperCAmelCase : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
__UpperCAmelCase : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__UpperCAmelCase : Tuple = downstream_dict["""model.linear.bias"""]
return model
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = WavaVecaForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
__UpperCAmelCase : Any = downstream_dict["""connector.weight"""]
__UpperCAmelCase : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : Optional[int] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__UpperCAmelCase : Tuple = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__UpperCAmelCase : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__UpperCAmelCase : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__UpperCAmelCase : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__UpperCAmelCase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__UpperCAmelCase : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = checkpoint["""Downstream"""]
__UpperCAmelCase : Optional[Any] = WavaVecaConfig.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
__UpperCAmelCase : Tuple = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__UpperCAmelCase : int = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__UpperCAmelCase : Any = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
__UpperCAmelCase : Optional[int] = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Dict = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
a : Dict = False
a : str = False
def lowerCamelCase__ ( __lowerCamelCase : Namespace ):
return TrainCommand(__lowerCamelCase )
class a ( lowercase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( __lowercase : ArgumentParser ) -> Dict:
__UpperCAmelCase : Optional[int] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__lowercase , required=__lowercase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__lowercase , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__lowercase , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__lowercase , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__lowercase , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__lowercase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__lowercase , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__lowercase , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__lowercase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__lowercase , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__lowercase , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__lowercase , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__lowercase , default=1e-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Dict , __lowercase : Namespace ) -> Dict:
__UpperCAmelCase : Union[str, Any] = logging.get_logger("""transformers-cli/training""" )
__UpperCAmelCase : Any = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__lowercase )
__UpperCAmelCase : List[Any] = args.output
__UpperCAmelCase : List[Any] = args.column_label
__UpperCAmelCase : int = args.column_text
__UpperCAmelCase : Union[str, Any] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__UpperCAmelCase : Any = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__UpperCAmelCase : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCAmelCase : int = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__UpperCAmelCase : List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCAmelCase : Optional[Any] = args.validation_split
__UpperCAmelCase : Dict = args.train_batch_size
__UpperCAmelCase : List[Any] = args.valid_batch_size
__UpperCAmelCase : List[str] = args.learning_rate
__UpperCAmelCase : Any = args.adam_epsilon
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
raise NotImplementedError
def UpperCAmelCase ( self : str ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : Dict = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : str = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__UpperCAmelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : List[Any] = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[int] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__UpperCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Any = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = None
if token is not None:
__UpperCAmelCase : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : Tuple = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = result.headers["""Location"""]
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Dict = os.path.join(__lowerCamelCase , f"""{artifact_name}.zip""" )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Tuple = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__UpperCAmelCase : Dict = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
__UpperCAmelCase : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__UpperCAmelCase : Optional[int] = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : Optional[int] = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` """
f"""and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
__UpperCAmelCase : Dict = None
if job_name and job_links:
__UpperCAmelCase : List[str] = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=None ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None ):
__UpperCAmelCase : str = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : Union[str, Any] = counter.most_common()
__UpperCAmelCase : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__UpperCAmelCase : List[Any] = test.split("""/""" )[2]
else:
__UpperCAmelCase : Any = None
return test
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=None ):
__UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : Tuple = [x for x in logs if x[2] is not None]
__UpperCAmelCase : Union[str, Any] = {x[2] for x in logs}
__UpperCAmelCase : Optional[int] = {}
for test in tests:
__UpperCAmelCase : Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : List[str] = counter.most_common()
__UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : Dict = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : str = {"""count""": n_errors, """errors""": error_counts}
__UpperCAmelCase : Any = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = """| no. | error | status |"""
__UpperCAmelCase : str = """|-:|:-|:-|"""
__UpperCAmelCase : str = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]["""count"""]
__UpperCAmelCase : List[Any] = f"""| {count} | {error[:100]} | |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = """| model | no. of errors | major error | count |"""
__UpperCAmelCase : int = """|-:|-:|-:|-:|"""
__UpperCAmelCase : int = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : Optional[int] = reduced_by_model[model]["""count"""]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
__UpperCAmelCase : Dict = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : List[str] = get_job_links(args.workflow_run_id, token=args.token)
a : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : Dict = k.find(" / ")
a : Optional[Any] = k[index + len(" / ") :]
a : Tuple = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : List[str] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : List[str] = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : Optional[Any] = make_github_table(reduced_by_error)
a : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( __lowerCamelCase : np.array ):
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : Distribution , __lowercase : Optional[int]=None , __lowercase : Optional[int]=None , __lowercase : Any=0 ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = 1.0 if scale is None else scale
__UpperCAmelCase : Any = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
return self.variance.sqrt()
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : int , __lowercase : Dict[str, int] , __lowercase : Callable[..., Tuple[torch.Tensor]] , **__lowercase : int ) -> None:
super().__init__(**__lowercase )
__UpperCAmelCase : str = args_dim
__UpperCAmelCase : int = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__UpperCAmelCase : List[str] = domain_map
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : torch.Tensor ) -> Tuple[torch.Tensor]:
__UpperCAmelCase : Dict = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : Optional[int] ) -> List[str]:
super().__init__()
__UpperCAmelCase : List[str] = function
def UpperCAmelCase ( self : int , __lowercase : List[Any] , *__lowercase : Tuple ) -> Any:
return self.function(__lowercase , *__lowercase )
class a :
"""simple docstring"""
a : type
a : int
a : Dict[str, int]
def __init__( self : Optional[int] , __lowercase : int = 1 ) -> None:
__UpperCAmelCase : Dict = dim
__UpperCAmelCase : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase ( self : int , __lowercase : str ) -> Dict:
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , ) -> Distribution:
__UpperCAmelCase : Tuple = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def UpperCAmelCase ( self : List[str] ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase ( self : int ) -> int:
return len(self.event_shape )
@property
def UpperCAmelCase ( self : Optional[int] ) -> float:
return 0.0
def UpperCAmelCase ( self : Optional[Any] , __lowercase : int ) -> nn.Module:
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : torch.Tensor ) -> str:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase ( __lowercase : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a ( lowercase__ ):
"""simple docstring"""
a : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a : type = StudentT
@classmethod
def UpperCAmelCase ( cls : Optional[int] , __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : torch.Tensor ) -> Any:
__UpperCAmelCase : List[str] = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__UpperCAmelCase : Tuple = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( lowercase__ ):
"""simple docstring"""
a : Dict[str, int] = {"loc": 1, "scale": 1}
a : type = Normal
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : torch.Tensor , __lowercase : torch.Tensor ) -> Optional[int]:
__UpperCAmelCase : str = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( lowercase__ ):
"""simple docstring"""
a : Dict[str, int] = {"total_count": 1, "logits": 1}
a : type = NegativeBinomial
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : torch.Tensor , __lowercase : torch.Tensor ) -> Any:
__UpperCAmelCase : Optional[int] = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCAmelCase ( self : int , __lowercase : List[Any] ) -> Distribution:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None ) -> Distribution:
__UpperCAmelCase , __UpperCAmelCase : Dict = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a : Dict = TypeVar("T")
def lowerCamelCase__ ( __lowerCamelCase : int ):
return (position - 1) // 2
def lowerCamelCase__ ( __lowerCamelCase : int ):
return (2 * position) + 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return (2 * position) + 2
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase : list[tuple[T, int]] = []
__UpperCAmelCase : dict[T, int] = {}
__UpperCAmelCase : int = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : str ) -> str:
return str(self.heap )
def UpperCAmelCase ( self : Dict ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T , __lowercase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase : Any = self.elements
self.elements += 1
self._bubble_up(__lowercase )
def UpperCAmelCase ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.heap[0]
self._bubble_down(__lowercase )
return elem
def UpperCAmelCase ( self : str , __lowercase : T , __lowercase : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase : Optional[int] = self.position_map[elem]
__UpperCAmelCase : Union[str, Any] = (elem, weight)
if position > 0:
__UpperCAmelCase : Dict = get_parent_position(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowercase )
else:
self._bubble_down(__lowercase )
else:
self._bubble_down(__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase : List[str] = self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase : Dict = get_parent_position(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Dict = self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase : Dict = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_up(__lowercase )
return None
def UpperCAmelCase ( self : str , __lowercase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase : Optional[Any] = self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.heap[curr_pos]
__UpperCAmelCase : Tuple = get_child_left_position(__lowercase )
__UpperCAmelCase : Tuple = get_child_right_position(__lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase : Any = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
return None
def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase : Union[str, Any] = self.heap[nodea_pos][0]
__UpperCAmelCase : Optional[int] = self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase : Union[str, Any] = nodea_pos
__UpperCAmelCase : List[str] = nodea_pos
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
__UpperCAmelCase : dict[T, dict[T, int]] = {}
__UpperCAmelCase : int = 0
def __repr__( self : List[str] ) -> str:
return str(self.connections )
def __len__( self : Any ) -> int:
return self.nodes
def UpperCAmelCase ( self : Optional[Any] , __lowercase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase : Optional[int] = {}
self.nodes += 1
def UpperCAmelCase ( self : List[str] , __lowercase : T , __lowercase : T , __lowercase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowercase )
self.add_node(__lowercase )
__UpperCAmelCase : Tuple = weight
__UpperCAmelCase : Union[str, Any] = weight
def lowerCamelCase__ ( __lowerCamelCase : GraphUndirectedWeighted[T] , ):
__UpperCAmelCase : dict[T, int] = {node: maxsize for node in graph.connections}
__UpperCAmelCase : dict[T, T | None] = {node: None for node in graph.connections}
__UpperCAmelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCamelCase , __lowerCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase : Any = priority_queue.extract_min()
__UpperCAmelCase : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase : Union[str, Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCamelCase , dist[neighbour] )
__UpperCAmelCase : List[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase : Optional[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCamelCase , dist[neighbour] )
__UpperCAmelCase : List[Any] = node
return dist, parent
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a : int = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
a : Optional[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
a : Any = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
return float((preds == labels).mean() )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int="binary" ):
__UpperCAmelCase : Optional[int] = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Any = {}
for id_pred, label in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
__UpperCAmelCase : Any = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCAmelCase : Optional[Any] = [(pred, label)]
__UpperCAmelCase , __UpperCAmelCase : int = [], []
for question, preds_labels in question_map.items():
__UpperCAmelCase , __UpperCAmelCase : Any = zip(*__lowerCamelCase )
__UpperCAmelCase : int = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average="""macro""" )
fas.append(__lowerCamelCase )
__UpperCAmelCase : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCamelCase ) )
ems.append(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = float(sum(__lowerCamelCase ) / len(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = sum(__lowerCamelCase ) / len(__lowerCamelCase )
__UpperCAmelCase : Tuple = float(fa_score(y_true=__lowerCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase ( self : Any , __lowercase : Dict , __lowercase : str ) -> int:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase )}
elif self.config_name == "cb":
return acc_and_fa(__lowercase , __lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
__UpperCAmelCase : Any = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__UpperCAmelCase : str = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowercase , __lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowercase , __lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a : Any = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = {}
state_dict.pop("""pixel_mean""" , __lowerCamelCase )
state_dict.pop("""pixel_std""" , __lowerCamelCase )
__UpperCAmelCase : int = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : str = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase : Optional[int] = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__UpperCAmelCase : List[Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__UpperCAmelCase : Tuple = key.replace("""layers.2""" , """proj_out""" )
__UpperCAmelCase : Tuple = value
__UpperCAmelCase : List[Any] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int="ybelkada/segment-anything" ):
__UpperCAmelCase : Union[str, Any] = hf_hub_download(__lowerCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__UpperCAmelCase : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase : List[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__UpperCAmelCase : int = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__UpperCAmelCase : Optional[Any] = SamConfig(
vision_config=__lowerCamelCase , )
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Optional[Any] = replace_keys(__lowerCamelCase )
__UpperCAmelCase : Any = SamImageProcessor()
__UpperCAmelCase : Dict = SamProcessor(image_processor=__lowerCamelCase )
__UpperCAmelCase : int = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = hf_model.to("""cuda""" )
__UpperCAmelCase : Optional[int] = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__UpperCAmelCase : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
__UpperCAmelCase : List[str] = [[[400, 650]]]
__UpperCAmelCase : Any = [[1]]
__UpperCAmelCase : Dict = processor(images=np.array(__lowerCamelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__UpperCAmelCase : List[str] = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__UpperCAmelCase : Tuple = hf_model(**__lowerCamelCase )
__UpperCAmelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__UpperCAmelCase : str = ((75, 275, 1725, 850),)
__UpperCAmelCase : Union[str, Any] = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__UpperCAmelCase : Any = hf_model(**__lowerCamelCase )
__UpperCAmelCase : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__UpperCAmelCase : Dict = [[[400, 650], [800, 650]]]
__UpperCAmelCase : Tuple = [[1, 1]]
__UpperCAmelCase : Any = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = hf_model(**__lowerCamelCase )
__UpperCAmelCase : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
a : Tuple = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
a : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
from math import pow
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__UpperCAmelCase : Tuple = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__UpperCAmelCase , __UpperCAmelCase : int = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a : int = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
a : int = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
a : Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Tuple , __lowercase : str , __lowercase : Optional[Any]=None , __lowercase : List[str]=True , __lowercase : Optional[Any]=False ) -> Dict:
if rouge_types is None:
__UpperCAmelCase : Union[str, Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
__UpperCAmelCase : int = rouge_scorer.RougeScorer(rouge_types=__lowercase , use_stemmer=__lowercase )
if use_aggregator:
__UpperCAmelCase : Any = scoring.BootstrapAggregator()
else:
__UpperCAmelCase : Union[str, Any] = []
for ref, pred in zip(__lowercase , __lowercase ):
__UpperCAmelCase : Optional[Any] = scorer.score(__lowercase , __lowercase )
if use_aggregator:
aggregator.add_scores(__lowercase )
else:
scores.append(__lowercase )
if use_aggregator:
__UpperCAmelCase : List[Any] = aggregator.aggregate()
else:
__UpperCAmelCase : Optional[Any] = {}
for key in scores[0]:
__UpperCAmelCase : Union[str, Any] = [score[key] for score in scores]
return result
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__UpperCAmelCase : Tuple = quote(__lowerCamelCase )
return hfh.hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" , revision=__lowerCamelCase )
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
__UpperCAmelCase : Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCAmelCase : Optional[int] = 1
return lst
if __name__ == "__main__":
a : Tuple = input("Enter numbers separated by a comma:\n").strip()
a : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
__UpperCAmelCase : Optional[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
return data[1:] + data[0]
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
__UpperCAmelCase : str = """"""
for i in range(len(__lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
__UpperCAmelCase : str = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[str] = message[:4]
__UpperCAmelCase : Optional[int] = message[4:]
__UpperCAmelCase : List[Any] = apply_table(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = xor(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = apply_sbox(__lowerCamelCase , temp[:4] ) # noqa: E741
__UpperCAmelCase : str = apply_sbox(__lowerCamelCase , temp[4:] )
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + l # noqa: E741
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + r
__UpperCAmelCase : Tuple = apply_table(l + r , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = xor(__lowerCamelCase , __lowerCamelCase )
return temp + right
if __name__ == "__main__":
a : Dict = input("Enter 10 bit key: ")
a : Tuple = input("Enter 8 bit message: ")
a : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
a : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a : Dict = [2, 4, 3, 1]
a : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
a : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a : Union[str, Any] = apply_table(key, paa_table)
a : Any = temp[:5]
a : Optional[Any] = temp[5:]
a : Optional[int] = left_shift(left)
a : int = left_shift(right)
a : List[str] = apply_table(left + right, pa_table)
a : List[Any] = left_shift(left)
a : int = left_shift(right)
a : Optional[Any] = left_shift(left)
a : str = left_shift(right)
a : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
a : Optional[int] = apply_table(message, IP)
a : Union[str, Any] = function(expansion, sa, sa, keya, temp)
a : Union[str, Any] = temp[4:] + temp[:4]
a : str = function(expansion, sa, sa, keya, temp)
a : Dict = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
a : Any = apply_table(CT, IP)
a : Optional[int] = function(expansion, sa, sa, keya, temp)
a : List[Any] = temp[4:] + temp[:4]
a : List[str] = function(expansion, sa, sa, keya, temp)
a : Any = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = TransfoXLTokenizer
a : List[Any] = False
a : Optional[int] = False
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : int = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase ( self : List[Any] , **__lowercase : Any ) -> List[str]:
__UpperCAmelCase : List[str] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[int] ) -> Any:
__UpperCAmelCase : str = """<unk> UNwanted , running"""
__UpperCAmelCase : Dict = """<unk> unwanted, running"""
return input_text, output_text
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowercase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [0, 4, 8, 7] )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : int = TransfoXLTokenizer(lower_case=__lowercase )
__UpperCAmelCase : Any = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
__UpperCAmelCase : Dict = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) , __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = len(__lowercase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> Any:
debug_launcher(test_script.main )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
debug_launcher(test_ops.main )
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
a : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
a : Optional[Any] = {value: key for key, value in encode_dict.items()}
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Tuple = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCamelCase__ ( __lowerCamelCase : str ):
if set(__lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__UpperCAmelCase : int = """"""
for word in coded.split():
while len(__lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCAmelCase : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
import numpy
# List of input, output pairs
a : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a : List[str] = (((515, 22, 13), 555), ((61, 35, 49), 150))
a : Optional[Any] = [2, 4, 1, 5]
a : List[Any] = len(train_data)
a : Dict = 0.0_09
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]="train" ):
return calculate_hypothesis_value(__lowerCamelCase , __lowerCamelCase ) - output(
__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : int = 0
for i in range(len(__lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str=m ):
__UpperCAmelCase : Optional[Any] = 0
for i in range(__lowerCamelCase ):
if index == -1:
summation_value += _error(__lowerCamelCase )
else:
summation_value += _error(__lowerCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : str = summation_of_cost_derivative(__lowerCamelCase , __lowerCamelCase ) / m
return cost_derivative_value
def lowerCamelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCAmelCase : Tuple = 0.0_0_0_0_0_2
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Any = 0
while True:
j += 1
__UpperCAmelCase : Dict = [0, 0, 0, 0]
for i in range(0 , len(__lowerCamelCase ) ):
__UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
__UpperCAmelCase : int = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCamelCase , __lowerCamelCase , atol=__lowerCamelCase , rtol=__lowerCamelCase , ):
break
__UpperCAmelCase : List[str] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowerCamelCase__ ( ):
for i in range(len(__lowerCamelCase ) ):
print(("""Actual output value:""", output(__lowerCamelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCamelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase__ ( __lowerCamelCase : str=None ):
if subparsers is not None:
__UpperCAmelCase : Dict = subparsers.add_parser("""env""" )
else:
__UpperCAmelCase : Tuple = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__lowerCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__lowerCamelCase )
return parser
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : int = is_xpu_available()
__UpperCAmelCase : int = is_npu_available()
__UpperCAmelCase : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : Tuple = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(__lowerCamelCase ),
"""PyTorch NPU available""": str(__lowerCamelCase ),
"""System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__UpperCAmelCase : Any = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__UpperCAmelCase : Optional[int] = (
"""\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else f"""\t{accelerate_config}"""
)
print(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = accelerate_config
return info
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = env_command_parser()
__UpperCAmelCase : Optional[int] = parser.parse_args()
env_command(__lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
from manim import *
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Dict = Text("""CPU""" , font_size=24 )
__UpperCAmelCase : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : List[str] = [mem.copy() for i in range(4 )]
__UpperCAmelCase : str = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = Text("""GPU""" , font_size=24 )
__UpperCAmelCase : Any = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
__UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
__UpperCAmelCase : int = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = Text("""Model""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
__UpperCAmelCase : Any = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCAmelCase : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
cpu_targs.append(__lowercase )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = Text("""Loaded Checkpoint""" , font_size=24 )
__UpperCAmelCase : List[str] = Group(__lowercase , __lowercase ).arrange(__lowercase , aligned_edge=__lowercase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : int = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
__UpperCAmelCase : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCAmelCase : Optional[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) , Write(__lowercase ) )
self.play(Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : List[Any] = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
first_animations.append(GrowFromCenter(__lowercase , run_time=1 ) )
__UpperCAmelCase : Any = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = KandinskyImgaImgPipeline
a : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
a : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
a : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : Optional[int] = False
@property
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Any ) -> Any:
return 100
@property
def UpperCAmelCase ( self : int ) -> str:
__UpperCAmelCase : List[str] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__UpperCAmelCase : int = MultilingualCLIP(__lowercase )
__UpperCAmelCase : List[Any] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : Dict = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__UpperCAmelCase : str = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : Tuple = self.dummy_unet
__UpperCAmelCase : Any = self.dummy_movq
__UpperCAmelCase : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__UpperCAmelCase : Dict = DDIMScheduler(**__lowercase )
__UpperCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : Any , __lowercase : List[Any] , __lowercase : int=0 ) -> Union[str, Any]:
__UpperCAmelCase : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowercase )
# create init_image
__UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Tuple = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[Any] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Dict = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Any = """cpu"""
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : int ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : str ) -> str:
__UpperCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : int = """A red cartoon frog, 4k"""
__UpperCAmelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : List[str] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__UpperCAmelCase : str = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : List[Any] = pipeline(
__lowercase , image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
__UpperCAmelCase : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a : str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'unispeech'
def __init__( self : Tuple , __lowercase : int=32 , __lowercase : int=768 , __lowercase : str=12 , __lowercase : int=12 , __lowercase : Any=3072 , __lowercase : List[Any]="gelu" , __lowercase : int=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Union[str, Any]=0.0 , __lowercase : List[Any]=0.0 , __lowercase : Any=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : List[str]=0.02 , __lowercase : Optional[Any]=1e-5 , __lowercase : Tuple="group" , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=(512, 512, 512, 512, 512, 512, 512) , __lowercase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __lowercase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __lowercase : Optional[int]=False , __lowercase : List[Any]=128 , __lowercase : Tuple=16 , __lowercase : List[Any]=False , __lowercase : List[str]=True , __lowercase : str=0.05 , __lowercase : int=10 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.0 , __lowercase : int=10 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=320 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.1 , __lowercase : str=100 , __lowercase : Union[str, Any]=256 , __lowercase : Any=256 , __lowercase : str=0.1 , __lowercase : Union[str, Any]="mean" , __lowercase : Union[str, Any]=False , __lowercase : Dict=False , __lowercase : List[str]=256 , __lowercase : List[str]=80 , __lowercase : Optional[int]=0 , __lowercase : Dict=1 , __lowercase : str=2 , __lowercase : Tuple=0.5 , **__lowercase : Optional[Any] , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : str = feat_extract_norm
__UpperCAmelCase : Tuple = feat_extract_activation
__UpperCAmelCase : List[Any] = list(__lowercase )
__UpperCAmelCase : Union[str, Any] = list(__lowercase )
__UpperCAmelCase : Optional[Any] = list(__lowercase )
__UpperCAmelCase : str = conv_bias
__UpperCAmelCase : str = num_conv_pos_embeddings
__UpperCAmelCase : Optional[int] = num_conv_pos_embedding_groups
__UpperCAmelCase : str = len(self.conv_dim )
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Dict = hidden_dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : Any = activation_dropout
__UpperCAmelCase : int = feat_proj_dropout
__UpperCAmelCase : Tuple = final_dropout
__UpperCAmelCase : Union[str, Any] = layerdrop
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = num_ctc_classes
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Tuple = do_stable_layer_norm
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : int = apply_spec_augment
__UpperCAmelCase : Any = mask_time_prob
__UpperCAmelCase : str = mask_time_length
__UpperCAmelCase : List[Any] = mask_time_min_masks
__UpperCAmelCase : List[str] = mask_feature_prob
__UpperCAmelCase : Optional[Any] = mask_feature_length
__UpperCAmelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCAmelCase : Optional[Any] = num_codevectors_per_group
__UpperCAmelCase : List[Any] = num_codevector_groups
__UpperCAmelCase : Any = contrastive_logits_temperature
__UpperCAmelCase : List[Any] = feat_quantizer_dropout
__UpperCAmelCase : List[str] = num_negatives
__UpperCAmelCase : Dict = codevector_dim
__UpperCAmelCase : Tuple = proj_codevector_dim
__UpperCAmelCase : int = diversity_loss_weight
# ctc loss
__UpperCAmelCase : Tuple = ctc_loss_reduction
__UpperCAmelCase : Tuple = ctc_zero_infinity
# pretraining loss
__UpperCAmelCase : List[Any] = replace_prob
@property
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
import os
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Tuple = len(grid[0] )
__UpperCAmelCase : int = len(__lowerCamelCase )
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
__UpperCAmelCase : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__UpperCAmelCase : Any = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__UpperCAmelCase : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__UpperCAmelCase : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__UpperCAmelCase : Optional[int] = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
__UpperCAmelCase : Union[str, Any] = max_product
return largest
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = []
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__UpperCAmelCase : Dict = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.