code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy
# List of input, output pairs
__UpperCAmelCase : Tuple = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
__UpperCAmelCase : Optional[int] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
__UpperCAmelCase : Any = [2, 4, 1, 5]
__UpperCAmelCase : int = len(train_data)
__UpperCAmelCase : Optional[int] = 0.009
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Dict="train" ) -> int:
'''simple docstring'''
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def lowercase_ ( __snake_case : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ :Any = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase_ ( __snake_case : List[Any] , __snake_case : List[Any] ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[int]=m ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Dict = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def lowercase_ ( __snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def lowercase_ ( ) -> Dict:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case__ :str = 0.0_0_0_0_0_2
snake_case__ :Union[str, Any] = 0
snake_case__ :Dict = 0
while True:
j += 1
snake_case__ :Dict = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
snake_case__ :Tuple = get_cost_derivative(i - 1 )
snake_case__ :int = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
snake_case__ :Optional[Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 701
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _snake_case :
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
return None
class _snake_case :
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
return None
class _snake_case ( unittest.TestCase ):
_A = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase_ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase_ ,"tf" ,12 ,**UpperCAmelCase_ )
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase_ ,"pt" ,12 ,**UpperCAmelCase_ )
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> str:
from transformers import BertModel
snake_case__ :str = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(UpperCAmelCase_ ) )
vocab_file.flush()
snake_case__ :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case__ :List[str] = BertModel(BertConfig(vocab_size=len(UpperCAmelCase_ ) ) )
model.save_pretrained(UpperCAmelCase_ )
self._test_export(UpperCAmelCase_ ,"pt" ,12 ,UpperCAmelCase_ )
@require_tf
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ :Union[str, Any] = self._test_export(UpperCAmelCase_ ,"tf" ,12 ,**UpperCAmelCase_ )
snake_case__ :Any = quantize(Path(UpperCAmelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ :Dict = self._test_export(UpperCAmelCase_ ,"pt" ,12 ,**UpperCAmelCase_ )
snake_case__ :str = quantize(UpperCAmelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> int:
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case__ :Optional[int] = Path(UpperCAmelCase_ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
return path
except Exception as e:
self.fail(UpperCAmelCase_ )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase_ ( self ) -> Dict:
from transformers import BertModel
snake_case__ :Dict = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ :Union[str, Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase_ ,UpperCAmelCase_ ,"pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
from transformers import TFBertModel
snake_case__ :Union[str, Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ :List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase_ ,UpperCAmelCase_ ,"tf" )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Tuple = FeatureExtractionPipeline(UpperCAmelCase_ ,UpperCAmelCase_ )
snake_case__ :Union[str, Any] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Any = infer_shapes(UpperCAmelCase_ ,UpperCAmelCase_ )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase_ ) ,len(UpperCAmelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,UpperCAmelCase_ )
self.assertSequenceEqual(variable_names[3:] ,UpperCAmelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] ,{0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] ,{0: "batch"} )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[Any] = ["input_ids", "attention_mask", "token_type_ids"]
snake_case__ :Union[str, Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
snake_case__ , snake_case__ :int = ensure_valid_input(FuncContiguousArgs() ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase_ ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase_ ) ,set(UpperCAmelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase_ ,(tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case__ , snake_case__ :str = ensure_valid_input(FuncNonContiguousArgs() ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase_ ) ,1 )
self.assertEqual(len(UpperCAmelCase_ ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] ,"input_ids" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) ,"-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" ,generated.as_posix() )
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger("transformers.models.speecht5")
__UpperCAmelCase : Optional[int] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__UpperCAmelCase : Dict = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__UpperCAmelCase : Optional[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__UpperCAmelCase : Optional[int] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__UpperCAmelCase : Any = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__UpperCAmelCase : Optional[Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__UpperCAmelCase : Dict = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__UpperCAmelCase : Optional[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__UpperCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCAmelCase : Optional[int] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase : str = []
__UpperCAmelCase : Union[str, Any] = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__UpperCAmelCase : Tuple = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__UpperCAmelCase : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__UpperCAmelCase : Any = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase_ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
snake_case__ :Optional[Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case ).shape
else:
snake_case__ :Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case__ :Tuple = value
elif weight_type == "weight_g":
snake_case__ :Optional[int] = value
elif weight_type == "weight_v":
snake_case__ :int = value
elif weight_type == "bias":
snake_case__ :List[str] = value
elif weight_type == "running_mean":
snake_case__ :Optional[Any] = value
elif weight_type == "running_var":
snake_case__ :str = value
elif weight_type == "num_batches_tracked":
snake_case__ :Union[str, Any] = value
else:
snake_case__ :Dict = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase_ ( __snake_case : str , __snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__ , snake_case__ :Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : List[str] ) -> List[str]:
'''simple docstring'''
snake_case__ :str = []
if task == "s2t":
snake_case__ :int = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ :Optional[int] = MAPPING_S2T
snake_case__ :str = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case__ :List[Any] = None
snake_case__ :Dict = MAPPING_T2S
snake_case__ :Tuple = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case__ :Tuple = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ :Optional[Any] = MAPPING_S2S
snake_case__ :Union[str, Any] = IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(F'{name} was ignored' )
continue
snake_case__ :List[str] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case__ :Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case__ , snake_case__ :Optional[int] = key.split(".*." )
if prefix in name and suffix in name:
snake_case__ :Tuple = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case__ :Dict = True
if "*" in mapped_key:
snake_case__ :Optional[int] = name.split(__snake_case )[0].split("." )[-2]
snake_case__ :int = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
snake_case__ :Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case__ :List[str] = "weight_v"
elif "bias" in name:
snake_case__ :Tuple = "bias"
elif "weight" in name:
snake_case__ :Optional[Any] = "weight"
elif "running_mean" in name:
snake_case__ :List[Any] = "running_mean"
elif "running_var" in name:
snake_case__ :List[str] = "running_var"
elif "num_batches_tracked" in name:
snake_case__ :Tuple = "num_batches_tracked"
else:
snake_case__ :Any = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] , __snake_case : str ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Any = full_name.split("conv_layers." )[-1]
snake_case__ :Optional[int] = name.split("." )
snake_case__ :List[str] = int(items[0] )
snake_case__ :Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case__ :Any = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case__ :Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case__ :List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case__ :Any = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase_ ( __snake_case : int , __snake_case : Tuple , __snake_case : Dict , __snake_case : Tuple=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ :List[str] = SpeechTaConfig.from_pretrained(__snake_case )
else:
snake_case__ :Dict = SpeechTaConfig()
if task == "s2t":
snake_case__ :List[Any] = config.max_text_positions
snake_case__ :Any = SpeechTaForSpeechToText(__snake_case )
elif task == "t2s":
snake_case__ :str = 18_76
snake_case__ :List[Any] = 6_00
snake_case__ :Dict = config.max_speech_positions
snake_case__ :str = SpeechTaForTextToSpeech(__snake_case )
elif task == "s2s":
snake_case__ :Any = 18_76
snake_case__ :List[str] = config.max_speech_positions
snake_case__ :int = SpeechTaForSpeechToSpeech(__snake_case )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
snake_case__ :Any = SpeechTaTokenizer(__snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case__ :List[str] = AddedToken("<mask>" , lstrip=__snake_case , rstrip=__snake_case )
snake_case__ :Optional[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case__ :Any = SpeechTaFeatureExtractor()
snake_case__ :Optional[Any] = SpeechTaProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(__snake_case )
snake_case__ :Any = torch.load(__snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCAmelCase : Tuple = datasets.logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__UpperCAmelCase : Dict = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__UpperCAmelCase : Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__UpperCAmelCase : Dict = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://github.com/google-research/bleurt" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/google-research/bleurt"] ,reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\')." )
snake_case__ :Optional[int] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case__ :int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case__ :Tuple = self.config_name.upper()
else:
raise KeyError(
f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case__ :Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case__ :str = score.BleurtScorer(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[str] = self.scorer.score(references=UpperCamelCase_ ,candidates=UpperCamelCase_ )
return {"scores": scores}
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase_ ( __snake_case : List[str] ) -> Tuple:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for char in word:
snake_case__ :Union[str, Any] = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Any = set()
for token in tokens:
snake_case__ :Optional[int] = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
snake_case__ :Dict = list(_A )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> str:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(_A ) for w in chinese_word_set] )
snake_case__ :Tuple = bert_tokens
snake_case__ :Tuple = 0, len(_A )
while start < end:
snake_case__ :Optional[Any] = True
if is_chinese(bert_word[start] ):
snake_case__ :Any = min(end - start , _A )
for i in range(_A , 1 , -1 ):
snake_case__ :Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :Dict = "##" + bert_word[j]
snake_case__ :str = start + i
snake_case__ :List[str] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = []
for i in range(0 , len(_A ) , 1_00 ):
snake_case__ :Optional[Any] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
snake_case__ :int = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
snake_case__ :Union[str, Any] = []
for i in range(0 , len(_A ) , 1_00 ):
snake_case__ :int = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_A , truncation=_A , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(_A ) == len(_A )
snake_case__ :List[str] = []
for input_ids, chinese_word in zip(_A , _A ):
snake_case__ :Union[str, Any] = []
for id in input_ids:
snake_case__ :List[Any] = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
snake_case__ :int = add_sub_symbol(_A , _A )
snake_case__ :List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
snake_case__ :Any = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def lowercase_ ( __snake_case : Dict ) -> List[Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :Dict = f.readlines()
snake_case__ :List[Any] = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :List[str] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Any = BertTokenizer.from_pretrained(args.bert )
snake_case__ :Dict = prepare_ref(_A , _A , _A )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :List[str] = [json.dumps(_A ) + "\n" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__UpperCAmelCase : Any = parser.parse_args()
main(args)
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__UpperCAmelCase : List[Any] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCAmelCase : Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCAmelCase : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__UpperCAmelCase : List[str] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__UpperCAmelCase : List[str] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__UpperCAmelCase : int = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__UpperCAmelCase : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__UpperCAmelCase : Tuple = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__UpperCAmelCase : Dict = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__UpperCAmelCase : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__UpperCAmelCase : int = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__UpperCAmelCase : Any = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__UpperCAmelCase : List[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__UpperCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCAmelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCAmelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCAmelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_MAPPING
__UpperCAmelCase : int = auto_class_update(FlaxAutoModel)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCAmelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCAmelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCAmelCase : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCAmelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class _snake_case ( _BaseAutoModelClass ):
_A = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCAmelCase : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
_A = ShapEPipeline
_A = ['prompt']
_A = ['prompt']
_A = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_A = False
@property
def lowerCAmelCase_ ( self ) -> Dict:
return 32
@property
def lowerCAmelCase_ ( self ) -> int:
return 32
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ) -> str:
return 8
@property
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ :Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ :Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
snake_case__ :Union[str, Any] = PriorTransformer(**_lowercase )
return model
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ :List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
snake_case__ :Dict = ShapERenderer(**_lowercase )
return model
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = self.dummy_prior
snake_case__ :Optional[Any] = self.dummy_text_encoder
snake_case__ :Optional[int] = self.dummy_tokenizer
snake_case__ :List[Any] = self.dummy_renderer
snake_case__ :Any = HeunDiscreteScheduler(
beta_schedule="exp" ,num_train_timesteps=1_024 ,prediction_type="sample" ,use_karras_sigmas=_lowercase ,clip_sample=_lowercase ,clip_sample_range=1.0 ,)
snake_case__ :str = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Tuple:
if str(_lowercase ).startswith("mps" ):
snake_case__ :Union[str, Any] = torch.manual_seed(_lowercase )
else:
snake_case__ :int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case__ :Union[str, Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[Any] = 'cpu'
snake_case__ :List[Any] = self.get_dummy_components()
snake_case__ :Optional[Any] = self.pipeline_class(**_lowercase )
snake_case__ :List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ :Dict = pipe(**self.get_dummy_inputs(_lowercase ) )
snake_case__ :Dict = output.images[0]
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case__ :int = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :str = torch_device == 'cpu'
snake_case__ :str = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_lowercase ,relax_max_difference=_lowercase ,)
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :int = self.get_dummy_components()
snake_case__ :Any = self.pipeline_class(**_lowercase )
snake_case__ :Optional[int] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ :List[Any] = 1
snake_case__ :int = 2
snake_case__ :Union[str, Any] = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ :List[Any] = batch_size * [inputs[key]]
snake_case__ :Tuple = pipe(**_lowercase ,num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
snake_case__ :int = ShapEPipeline.from_pretrained("openai/shap-e" )
snake_case__ :str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ :Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case__ :Union[str, Any] = pipe(
"a shark" ,generator=_lowercase ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowercase ,_lowercase )
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __A , )
class _snake_case ( __A ):
_A = RobertaConfig
_A = 'roberta'
def __init__( self ,UpperCamelCase ) -> Optional[int]:
super().__init__(UpperCamelCase )
snake_case__ :List[str] = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __A , )
class _snake_case ( __A ):
_A = RobertaConfig
_A = 'roberta'
def __init__( self ,UpperCamelCase ) -> Union[str, Any]:
super().__init__(UpperCamelCase )
snake_case__ :Optional[int] = config.num_labels
snake_case__ :List[Any] = config.num_hidden_layers
snake_case__ :Any = DeeRobertaModel(UpperCamelCase )
snake_case__ :str = nn.Dropout(config.hidden_dropout_prob )
snake_case__ :int = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=-1 ,UpperCamelCase=False ,) -> Dict:
snake_case__ :List[str] = self.num_layers
try:
snake_case__ :Tuple = self.roberta(
UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,position_ids=UpperCamelCase ,head_mask=UpperCamelCase ,inputs_embeds=UpperCamelCase ,)
snake_case__ :Optional[Any] = outputs[1]
snake_case__ :Any = self.dropout(UpperCamelCase )
snake_case__ :int = self.classifier(UpperCamelCase )
snake_case__ :int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ :List[str] = e.message
snake_case__ :Optional[Any] = e.exit_layer
snake_case__ :Any = outputs[0]
if not self.training:
snake_case__ :List[str] = entropy(UpperCamelCase )
snake_case__ :int = []
snake_case__ :Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ :Dict = MSELoss()
snake_case__ :Any = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ :str = CrossEntropyLoss()
snake_case__ :Any = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
snake_case__ :List[Any] = []
for highway_exit in outputs[-1]:
snake_case__ :Any = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ :Union[str, Any] = MSELoss()
snake_case__ :Optional[Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ :Tuple = CrossEntropyLoss()
snake_case__ :List[Any] = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
snake_case__ :int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ :Any = (loss,) + outputs
if not self.training:
snake_case__ :Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ :Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class _snake_case ( _A ):
def __init__( self ,*UpperCamelCase ,**UpperCamelCase ) -> List[Any]:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." ,_lowerCAmelCase ,)
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
def lowercase_ ( __snake_case : str , __snake_case : Any ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
snake_case__ :Dict = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
snake_case__ :List[Any] = str(bin(lowerCamelCase__ ) )[2:]
snake_case__ :Optional[Any] = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] ) -> Any:
'''simple docstring'''
snake_case__ :Dict = tmp_path / "cache"
snake_case__ :List[str] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ :str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> str:
'''simple docstring'''
snake_case__ :int = tmp_path / "cache"
snake_case__ :Tuple = {"text": "string"}
snake_case__ :Optional[Any] = features.copy() if features else default_expected_features
snake_case__ :List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :Any = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( __snake_case : Dict , __snake_case : str , __snake_case : int ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = tmp_path / "cache"
snake_case__ :List[Any] = {"text": "string"}
snake_case__ :Dict = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ :Dict = text_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ :int = [text_path]
snake_case__ :List[Any] = tmp_path / "cache"
snake_case__ :Optional[int] = {"text": "string"}
snake_case__ :Union[str, Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( __snake_case : Dict , __snake_case : int , __snake_case : List[Any]=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
snake_case__ :Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Optional[int] = tmp_path / "cache"
snake_case__ :Dict = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ :Dict = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case__ :Optional[int] = {"text": "string"}
snake_case__ :Optional[int] = features.copy() if features else default_expected_features
snake_case__ :str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :Tuple = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
if split:
snake_case__ :Tuple = {split: text_path}
else:
snake_case__ :List[str] = "train"
snake_case__ :List[str] = {"train": text_path, "test": text_path}
snake_case__ :str = tmp_path / "cache"
snake_case__ :int = {"text": "string"}
snake_case__ :str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( UpperCamelCase_ , unittest.TestCase ):
_A = UnCLIPImageVariationPipeline
_A = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
_A = IMAGE_VARIATION_BATCH_PARAMS
_A = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
_A = False
@property
def lowerCAmelCase_ ( self ) -> int:
return 32
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return 32
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return self.time_input_dim
@property
def lowerCAmelCase_ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ) -> Any:
return 100
@property
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ :Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_a )
@property
def lowerCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
snake_case__ :str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(_a )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case__ :Optional[int] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
snake_case__ :List[Any] = UnCLIPTextProjModel(**_a )
return model
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ :Tuple = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
snake_case__ :List[str] = UNetaDConditionModel(**_a )
return model
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ :List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(1 )
snake_case__ :List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :str = self.dummy_decoder
snake_case__ :Dict = self.dummy_text_proj
snake_case__ :Any = self.dummy_text_encoder
snake_case__ :Optional[Any] = self.dummy_tokenizer
snake_case__ :Any = self.dummy_super_res_first
snake_case__ :Dict = self.dummy_super_res_last
snake_case__ :int = UnCLIPScheduler(
variance_type="learned_range" ,prediction_type="epsilon" ,num_train_timesteps=1_000 ,)
snake_case__ :Dict = UnCLIPScheduler(
variance_type="fixed_small_log" ,prediction_type="epsilon" ,num_train_timesteps=1_000 ,)
snake_case__ :List[Any] = CLIPImageProcessor(crop_size=32 ,size=32 )
snake_case__ :Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ,UpperCamelCase=True ) -> Dict:
snake_case__ :Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("mps" ):
snake_case__ :Optional[Any] = torch.manual_seed(_a )
else:
snake_case__ :Tuple = torch.Generator(device=_a ).manual_seed(_a )
if pil_image:
snake_case__ :Union[str, Any] = input_image * 0.5 + 0.5
snake_case__ :int = input_image.clamp(0 ,1 )
snake_case__ :int = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
snake_case__ :str = DiffusionPipeline.numpy_to_pil(_a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = """cpu"""
snake_case__ :str = self.get_dummy_components()
snake_case__ :Optional[int] = self.pipeline_class(**_a )
snake_case__ :str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ :Dict = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :Optional[int] = pipe(**_a )
snake_case__ :List[Any] = output.images
snake_case__ :int = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :Union[str, Any] = pipe(
**_a ,return_dict=_a ,)[0]
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
snake_case__ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ :str = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = """cpu"""
snake_case__ :List[Any] = self.get_dummy_components()
snake_case__ :Optional[Any] = self.pipeline_class(**_a )
snake_case__ :Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ :int = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :str = pipe(**_a )
snake_case__ :List[str] = output.images
snake_case__ :Union[str, Any] = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :Any = pipe(
**_a ,return_dict=_a ,)[0]
snake_case__ :List[Any] = image[0, -3:, -3:, -1]
snake_case__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ :Any = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Dict = """cpu"""
snake_case__ :Dict = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**_a )
snake_case__ :Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ :Union[str, Any] = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :int = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
snake_case__ :int = pipe(**_a )
snake_case__ :List[Any] = output.images
snake_case__ :List[str] = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :List[str] = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
snake_case__ :int = pipe(
**_a ,return_dict=_a ,)[0]
snake_case__ :List[Any] = image[0, -3:, -3:, -1]
snake_case__ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case__ :Union[str, Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[Any] = torch.device("cpu" )
class _snake_case :
_A = 1
snake_case__ :str = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**_a )
snake_case__ :Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ :Union[str, Any] = torch.Generator(device=_a ).manual_seed(0 )
snake_case__ :List[Any] = pipe.decoder.dtype
snake_case__ :List[Any] = 1
snake_case__ :List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ :Any = pipe.prepare_latents(
_a ,dtype=_a ,device=_a ,generator=_a ,latents=_a ,scheduler=DummyScheduler() )
snake_case__ :Tuple = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ :List[Any] = pipe.prepare_latents(
_a ,dtype=_a ,device=_a ,generator=_a ,latents=_a ,scheduler=DummyScheduler() )
snake_case__ :int = self.get_dummy_inputs(_a ,pil_image=_a )
snake_case__ :List[Any] = pipe(
**_a ,decoder_latents=_a ,super_res_latents=_a ).images
snake_case__ :Tuple = self.get_dummy_inputs(_a ,pil_image=_a )
# Don't pass image, instead pass embedding
snake_case__ :Dict = pipeline_inputs.pop("image" )
snake_case__ :Union[str, Any] = pipe.image_encoder(_a ).image_embeds
snake_case__ :Optional[int] = pipe(
**_a ,decoder_latents=_a ,super_res_latents=_a ,image_embeddings=_a ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ :Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_a ,expected_max_diff=_a )
@skip_mps
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[str] = torch_device == """cpu"""
snake_case__ :List[str] = True
snake_case__ :List[str] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=_a ,relax_max_difference=_a ,additional_params_copy_to_batched_inputs=_a ,)
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[int] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ :List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_a ,additional_params_copy_to_batched_inputs=_a ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_a )
@skip_mps
def lowerCAmelCase_ ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ ( self ) -> Optional[int]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
snake_case__ :str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
snake_case__ :List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" ,torch_dtype=torch.floataa )
snake_case__ :int = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case__ :List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case__ :Any = pipeline(
_a ,generator=_a ,output_type="np" ,)
snake_case__ :Any = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_a ,_a ,15 )
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
snake_case__ :Any = False
def lowercase_ ( ) -> Dict:
'''simple docstring'''
snake_case__ :int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ :Optional[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase_ ( __snake_case : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Optional[int] = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = datetime.now()
snake_case__ :List[str] = current_time.strftime("%H:%M:%S" )
return timestamp
| 713
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[int] = 1
snake_case__ :Any = 3
snake_case__ :Union[str, Any] = (32, 32)
snake_case__ :str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowerCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
snake_case__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
return model
@property
def lowerCAmelCase_ ( self ) -> str:
torch.manual_seed(0 )
snake_case__ :Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
return model
@property
def lowerCAmelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case__ :List[str] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_006 ,)
return RobertaSeriesModelWithTransformation(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self ) -> str:
def extract(*UpperCamelCase ,**UpperCamelCase ):
class _snake_case :
def __init__( self ) -> str:
snake_case__ :Union[str, Any] = torch.ones([0] )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :List[str] = self.dummy_cond_unet
snake_case__ :List[Any] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
snake_case__ :Optional[int] = self.dummy_vae
snake_case__ :Any = self.dummy_text_encoder
snake_case__ :Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case__ :Union[str, Any] = 77
snake_case__ :int = self.dummy_image.to(_UpperCAmelCase )
snake_case__ :str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case__ :List[Any] = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,vae=_UpperCAmelCase ,text_encoder=_UpperCAmelCase ,tokenizer=_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,feature_extractor=self.dummy_extractor ,)
snake_case__ :List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=_UpperCAmelCase )
snake_case__ :int = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case__ :List[Any] = "A painting of a squirrel eating a burger"
snake_case__ :int = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case__ :Union[str, Any] = alt_pipe(
[prompt] ,generator=_UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=_UpperCAmelCase ,)
snake_case__ :Optional[int] = output.images
snake_case__ :List[str] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case__ :Optional[Any] = alt_pipe(
[prompt] ,generator=_UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=_UpperCAmelCase ,return_dict=_UpperCAmelCase ,)[0]
snake_case__ :Dict = image[0, -3:, -3:, -1]
snake_case__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = self.dummy_cond_unet
snake_case__ :Tuple = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
snake_case__ :List[str] = self.dummy_vae
snake_case__ :List[Any] = self.dummy_text_encoder
snake_case__ :int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case__ :List[Any] = 77
snake_case__ :Optional[int] = self.dummy_image.to(_UpperCAmelCase )
# put models in fp16
snake_case__ :Dict = unet.half()
snake_case__ :List[Any] = vae.half()
snake_case__ :List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ :Optional[int] = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,vae=_UpperCAmelCase ,text_encoder=_UpperCAmelCase ,tokenizer=_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,feature_extractor=self.dummy_extractor ,)
snake_case__ :Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=_UpperCAmelCase )
snake_case__ :Dict = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case__ :Tuple = "A painting of a squirrel eating a burger"
snake_case__ :Dict = torch.manual_seed(0 )
snake_case__ :List[Any] = alt_pipe(
[prompt] ,generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type="np" ,image=_UpperCAmelCase ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ :Tuple = init_image.resize((760, 504) )
snake_case__ :Dict = "BAAI/AltDiffusion"
snake_case__ :str = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case__ :List[str] = "A fantasy landscape, trending on artstation"
snake_case__ :List[str] = torch.manual_seed(0 )
snake_case__ :Optional[int] = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=_UpperCAmelCase ,output_type="np" ,)
snake_case__ :str = output.images[0]
snake_case__ :Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case__ :Tuple = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case__ :Any = init_image.resize((768, 512) )
snake_case__ :str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case__ :str = "BAAI/AltDiffusion"
snake_case__ :int = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case__ :Any = "A fantasy landscape, trending on artstation"
snake_case__ :Any = torch.manual_seed(0 )
snake_case__ :Dict = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=_UpperCAmelCase ,output_type="np" ,)
snake_case__ :List[str] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase : List[Any] = pytest.mark.integration
@require_faiss
class _snake_case ( __lowerCAmelCase ):
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Dict = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(_UpperCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
import faiss
snake_case__ :Dataset = self._create_dummy_dataset()
snake_case__ :Optional[Any] = dset.map(
lambda UpperCamelCase ,UpperCamelCase : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=_UpperCamelCase ,keep_in_memory=_UpperCamelCase )
snake_case__ :Dict = dset.add_faiss_index("vecs" ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ :Optional[int] = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
dset.drop_index("vecs" )
def lowerCAmelCase_ ( self ) -> int:
import faiss
snake_case__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
snake_case__ :Optional[int] = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def lowerCAmelCase_ ( self ) -> Optional[int]:
import faiss
snake_case__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" ,tmp_file.name )
dset.load_faiss_index("vecs2" ,tmp_file.name )
os.unlink(tmp_file.name )
snake_case__ :str = dset.get_nearest_examples("vecs2" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(_UpperCamelCase ,partial(dset.get_nearest_examples ,"vecs2" ,np.ones(5 ,dtype=np.floataa ) ) )
def lowerCAmelCase_ ( self ) -> Dict:
from elasticsearch import Elasticsearch
snake_case__ :Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
snake_case__ :Union[str, Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
snake_case__ :Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
snake_case__ :Any = Elasticsearch()
dset.add_elasticsearch_index("filename" ,es_client=_UpperCamelCase )
snake_case__ :List[Any] = dset.get_nearest_examples("filename" ,"my_name-train_29" )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
@require_faiss
class _snake_case ( __lowerCAmelCase ):
def lowerCAmelCase_ ( self ) -> str:
import faiss
snake_case__ :Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
snake_case__ :List[Any] = np.zeros(5 ,dtype=np.floataa )
snake_case__ :int = 1
snake_case__ :Tuple = index.search(_UpperCamelCase )
self.assertRaises(_UpperCamelCase ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
snake_case__ :List[str] = np.eye(5 ,dtype=np.floataa )[::-1]
snake_case__ :Tuple = index.search_batch(_UpperCamelCase )
self.assertRaises(_UpperCamelCase ,index.search_batch ,queries[0] )
snake_case__ :int = [scores[0] for scores in total_scores]
snake_case__ :str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,_UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
import faiss
snake_case__ :List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
snake_case__ :Union[str, Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(_UpperCamelCase ):
snake_case__ :List[str] = FaissIndex(string_factory="Flat" ,custom_index=faiss.IndexFlat(5 ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
import faiss
snake_case__ :str = faiss.IndexFlat(5 )
snake_case__ :List[str] = FaissIndex(custom_index=_UpperCamelCase )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def lowerCAmelCase_ ( self ) -> int:
import faiss
snake_case__ :Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase ) as tmp_file:
index.save(tmp_file.name )
snake_case__ :Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
snake_case__ :Optional[Any] = np.zeros(5 ,dtype=np.floataa )
snake_case__ :Tuple = 1
snake_case__ :Optional[int] = index.search(_UpperCamelCase )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def lowercase_ ( __snake_case : Any ) -> Any:
'''simple docstring'''
import faiss
snake_case__ :Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
snake_case__ :int = """index.faiss"""
snake_case__ :List[Any] = F'mock://{index_name}'
index.save(__A , storage_options=mockfs.storage_options )
snake_case__ :str = FaissIndex.load(__A , storage_options=mockfs.storage_options )
snake_case__ :List[str] = np.zeros(5 , dtype=np.floataa )
snake_case__ :Any = 1
snake_case__ :Union[str, Any] = index.search(__A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( __lowerCAmelCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
snake_case__ :Optional[int] = Elasticsearch()
snake_case__ :List[Any] = {"""acknowledged""": True}
snake_case__ :Union[str, Any] = ElasticSearchIndex(es_client=_UpperCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
snake_case__ :int = """foo"""
snake_case__ :List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case__ :Optional[int] = index.search(_UpperCamelCase )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
snake_case__ :Dict = """foo"""
snake_case__ :str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case__ :List[Any] = index.search(_UpperCamelCase ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
snake_case__ :List[str] = ["""foo""", """bar""", """foobar"""]
snake_case__ :Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case__ :Optional[Any] = index.search_batch(_UpperCamelCase )
snake_case__ :str = [scores[0] for scores in total_scores]
snake_case__ :Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCamelCase )
# batched queries with timeout
snake_case__ :Union[str, Any] = ["""foo""", """bar""", """foobar"""]
snake_case__ :str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case__ :Any = index.search_batch(_UpperCamelCase ,request_timeout=30 )
snake_case__ :List[Any] = [scores[0] for scores in total_scores]
snake_case__ :Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCamelCase )
| 715
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_A = None
class _snake_case ( datasets.ArrowBasedBuilder ):
_A = PandasConfig
def lowerCAmelCase_ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ :Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A ,(str, list, tuple) ):
snake_case__ :Tuple = data_files
if isinstance(_A ,_A ):
snake_case__ :List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
snake_case__ :Any = []
for split_name, files in data_files.items():
if isinstance(_A ,_A ):
snake_case__ :int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :List[str] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A ,gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ :Optional[int] = table_cast(_A ,self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict:
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A ,"rb" ) as f:
snake_case__ :List[Any] = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 716
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase_ ( __snake_case : List[str] , __snake_case : str , __snake_case : int = "x" , __snake_case : List[Any] = 1_0**-1_0 , __snake_case : List[str] = 1 , ) -> int:
'''simple docstring'''
snake_case__ :str = symbols(_lowerCamelCase )
snake_case__ :int = lambdify(_lowerCamelCase , _lowerCamelCase )
snake_case__ :List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
snake_case__ :Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
snake_case__ :Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case__ :int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'''{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 717
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 0
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=16 ,UpperCamelCase=[1, 2, 1] ,UpperCamelCase=[2, 2, 4] ,UpperCamelCase=2 ,UpperCamelCase=2.0 ,UpperCamelCase=True ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase="gelu" ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=10 ,UpperCamelCase=8 ,) -> int:
snake_case__ :Optional[Any] = parent
snake_case__ :int = batch_size
snake_case__ :int = image_size
snake_case__ :Union[str, Any] = patch_size
snake_case__ :Tuple = num_channels
snake_case__ :Any = embed_dim
snake_case__ :int = depths
snake_case__ :List[str] = num_heads
snake_case__ :Union[str, Any] = window_size
snake_case__ :Optional[Any] = mlp_ratio
snake_case__ :List[Any] = qkv_bias
snake_case__ :Union[str, Any] = hidden_dropout_prob
snake_case__ :Dict = attention_probs_dropout_prob
snake_case__ :List[str] = drop_path_rate
snake_case__ :List[Any] = hidden_act
snake_case__ :Optional[Any] = use_absolute_embeddings
snake_case__ :Any = patch_norm
snake_case__ :Union[str, Any] = layer_norm_eps
snake_case__ :List[Any] = initializer_range
snake_case__ :Any = is_training
snake_case__ :List[Any] = scope
snake_case__ :Optional[Any] = use_labels
snake_case__ :Optional[Any] = type_sequence_label_size
snake_case__ :Optional[Any] = encoder_stride
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :int = None
if self.use_labels:
snake_case__ :Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Tuple = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> int:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case__ :int = model(lowerCamelCase_ )
snake_case__ :List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ :Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case__ :Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ :Tuple = 1
snake_case__ :List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case__ :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Tuple = self.type_sequence_label_size
snake_case__ :Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case__ :Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = self.prepare_config_and_inputs()
snake_case__ :Optional[int] = config_and_inputs
snake_case__ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :int = SwinvaModelTester(self )
snake_case__ :Dict = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def lowerCAmelCase_ ( self ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
snake_case__ :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Optional[Any] = model_class(lowerCamelCase_ )
snake_case__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Optional[int] = [*signature.parameters.keys()]
snake_case__ :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Tuple = True
for model_class in self.all_model_classes:
snake_case__ :List[str] = True
snake_case__ :List[Any] = False
snake_case__ :Any = True
snake_case__ :Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
snake_case__ :str = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
snake_case__ :Tuple = outputs.attentions
snake_case__ :Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ :Union[str, Any] = True
snake_case__ :Optional[int] = config.window_size**2
snake_case__ :str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
snake_case__ :Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
snake_case__ :Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
snake_case__ :Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
snake_case__ :int = True
snake_case__ :Dict = True
snake_case__ :int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
snake_case__ :List[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
snake_case__ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case__ :Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
snake_case__ :Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
snake_case__ :Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
snake_case__ :Dict = outputs.hidden_states
snake_case__ :str = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
snake_case__ :List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
snake_case__ :Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
snake_case__ :Any = reshaped_hidden_states[0].shape
snake_case__ :Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ :Tuple = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :List[Any] = 3
snake_case__ :int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ :Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ :Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ :Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ :Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
snake_case__ :Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
lowerCamelCase_ )
snake_case__ :Dict = self.default_image_processor
snake_case__ :Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case__ :List[str] = image_processor(images=lowerCamelCase_ ,return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
snake_case__ :Tuple = model(**lowerCamelCase_ )
# verify the logits
snake_case__ :List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
snake_case__ :int = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1E-4 ) )
| 718
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCAmelCase : List[str] = datasets.load_iris()
__UpperCAmelCase : int = np.array(data["data"])
__UpperCAmelCase : Optional[int] = np.array(data["target"])
__UpperCAmelCase : Optional[int] = data['''target_names''']
__UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def lowercase_ ( __snake_case : List[str] , __snake_case : Any ) -> Tuple:
'''simple docstring'''
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any]=5 ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = zip(_A , _A )
# List of distances of all points from the point to be classified
snake_case__ :List[str] = []
for data_point in data:
snake_case__ :Optional[Any] = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
snake_case__ :Dict = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
snake_case__ :Union[str, Any] = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=100 ,UpperCamelCase=13 ,UpperCamelCase=30 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=10 ,UpperCamelCase=0.02 ,UpperCamelCase=3 ,) -> Optional[int]:
snake_case__ :Tuple = parent
snake_case__ :Any = vocab_size
snake_case__ :List[str] = batch_size
snake_case__ :int = image_size
snake_case__ :Any = patch_size
snake_case__ :List[str] = num_channels
snake_case__ :List[Any] = is_training
snake_case__ :Optional[Any] = use_labels
snake_case__ :str = hidden_size
snake_case__ :int = num_hidden_layers
snake_case__ :Optional[Any] = num_attention_heads
snake_case__ :Optional[int] = intermediate_size
snake_case__ :Any = hidden_act
snake_case__ :Any = hidden_dropout_prob
snake_case__ :List[Any] = attention_probs_dropout_prob
snake_case__ :int = type_sequence_label_size
snake_case__ :Optional[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ :Dict = (image_size // patch_size) ** 2
snake_case__ :Tuple = num_patches + 1
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :Dict = None
if self.use_labels:
snake_case__ :Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Optional[int] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :int = FlaxBeitModel(config=snake_case_ )
snake_case__ :List[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :Tuple = FlaxBeitForMaskedImageModeling(config=snake_case_ )
snake_case__ :Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :Dict = self.type_sequence_label_size
snake_case__ :List[str] = FlaxBeitForImageClassification(config=snake_case_ )
snake_case__ :Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ :Tuple = 1
snake_case__ :List[str] = FlaxBeitForImageClassification(snake_case_ )
snake_case__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :List[str] = model(snake_case_ )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :Tuple = config_and_inputs
snake_case__ :int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( _a , unittest.TestCase ):
_A = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCAmelCase_ ( self ) -> None:
snake_case__ :List[Any] = FlaxBeitModelTester(self )
snake_case__ :List[str] = ConfigTester(self ,config_class=snake_case_ ,has_text_modality=snake_case_ ,hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Union[str, Any] = model_class(snake_case_ )
snake_case__ :Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :int = [*signature.parameters.keys()]
snake_case__ :List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,snake_case_ )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ , snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :Optional[Any] = self._prepare_for_class(snake_case_ ,snake_case_ )
snake_case__ :Tuple = model_class(snake_case_ )
@jax.jit
def model_jitted(UpperCamelCase ,**UpperCamelCase ):
return model(pixel_values=snake_case_ ,**snake_case_ )
with self.subTest("JIT Enabled" ):
snake_case__ :Tuple = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :Any = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) ,len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ ,snake_case_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
snake_case__ :str = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
snake_case__ :List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
snake_case__ :Any = self.default_image_processor
snake_case__ :List[Any] = prepare_img()
snake_case__ :int = image_processor(images=snake_case_ ,return_tensors="np" ).pixel_values
# prepare bool_masked_pos
snake_case__ :str = np.ones((1, 196) ,dtype=snake_case_ )
# forward pass
snake_case__ :Optional[Any] = model(pixel_values=snake_case_ ,bool_masked_pos=snake_case_ )
snake_case__ :Optional[int] = outputs.logits
# verify the logits
snake_case__ :str = (1, 196, 8_192)
self.assertEqual(logits.shape ,snake_case_ )
snake_case__ :Any = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,snake_case_ ,atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Dict = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
snake_case__ :List[str] = self.default_image_processor
snake_case__ :Any = prepare_img()
snake_case__ :Dict = image_processor(images=snake_case_ ,return_tensors="np" )
# forward pass
snake_case__ :Dict = model(**snake_case_ )
snake_case__ :Optional[Any] = outputs.logits
# verify the logits
snake_case__ :Dict = (1, 1_000)
self.assertEqual(logits.shape ,snake_case_ )
snake_case__ :List[str] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,snake_case_ ,atol=1E-4 ) )
snake_case__ :Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case_ )
@slow
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Union[str, Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
snake_case__ :int = self.default_image_processor
snake_case__ :Any = prepare_img()
snake_case__ :Union[str, Any] = image_processor(images=snake_case_ ,return_tensors="np" )
# forward pass
snake_case__ :Union[str, Any] = model(**snake_case_ )
snake_case__ :Optional[Any] = outputs.logits
# verify the logits
snake_case__ :int = (1, 21_841)
self.assertEqual(logits.shape ,snake_case_ )
snake_case__ :str = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,snake_case_ ,atol=1E-4 ) )
snake_case__ :str = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case_ )
| 720
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
def lowercase_ ( __snake_case : int , __snake_case : int ) -> Dict:
'''simple docstring'''
snake_case__ :Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case__ :Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def lowercase_ ( __snake_case : int ) -> List[str]:
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def lowercase_ ( __snake_case : int ) -> List[Any]:
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
snake_case__ :Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase_ ( __snake_case : int ) -> Dict:
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 721
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 0
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class _snake_case ( _lowercase ):
_A = '''conditional_detr'''
_A = ['''past_key_values''']
_A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=3 ,UpperCamelCase=300 ,UpperCamelCase=6 ,UpperCamelCase=2_048 ,UpperCamelCase=8 ,UpperCamelCase=6 ,UpperCamelCase=2_048 ,UpperCamelCase=8 ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=True ,UpperCamelCase="relu" ,UpperCamelCase=256 ,UpperCamelCase=0.1 ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1.0 ,UpperCamelCase=False ,UpperCamelCase="sine" ,UpperCamelCase="resnet50" ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=5 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=5 ,UpperCamelCase=2 ,UpperCamelCase=0.25 ,**UpperCamelCase ,) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case__ :List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ ,A_ ):
snake_case__ :Tuple = backbone_config.get("model_type" )
snake_case__ :Dict = CONFIG_MAPPING[backbone_model_type]
snake_case__ :Tuple = config_class.from_dict(A_ )
snake_case__ :Dict = use_timm_backbone
snake_case__ :Dict = backbone_config
snake_case__ :Optional[int] = num_channels
snake_case__ :Union[str, Any] = num_queries
snake_case__ :List[str] = d_model
snake_case__ :str = encoder_ffn_dim
snake_case__ :Optional[int] = encoder_layers
snake_case__ :Any = encoder_attention_heads
snake_case__ :Tuple = decoder_ffn_dim
snake_case__ :Any = decoder_layers
snake_case__ :List[str] = decoder_attention_heads
snake_case__ :List[Any] = dropout
snake_case__ :int = attention_dropout
snake_case__ :List[Any] = activation_dropout
snake_case__ :Dict = activation_function
snake_case__ :Optional[int] = init_std
snake_case__ :int = init_xavier_std
snake_case__ :List[Any] = encoder_layerdrop
snake_case__ :Tuple = decoder_layerdrop
snake_case__ :Union[str, Any] = encoder_layers
snake_case__ :List[str] = auxiliary_loss
snake_case__ :Optional[Any] = position_embedding_type
snake_case__ :Tuple = backbone
snake_case__ :Optional[int] = use_pretrained_backbone
snake_case__ :Optional[Any] = dilation
# Hungarian matcher
snake_case__ :List[Any] = class_cost
snake_case__ :Tuple = bbox_cost
snake_case__ :Any = giou_cost
# Loss coefficients
snake_case__ :Optional[int] = mask_loss_coefficient
snake_case__ :Any = dice_loss_coefficient
snake_case__ :Optional[int] = cls_loss_coefficient
snake_case__ :List[Any] = bbox_loss_coefficient
snake_case__ :Dict = giou_loss_coefficient
snake_case__ :Dict = focal_alpha
super().__init__(is_encoder_decoder=A_ ,**A_ )
@property
def lowerCAmelCase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
return self.d_model
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case__ :str = self.backbone_config.to_dict()
snake_case__ :str = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
_A = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1E-5
@property
def lowerCAmelCase_ ( self ) -> int:
return 12
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
snake_case__ :str = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(UpperCamelCase ) ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) ,x.transpose() ) )
snake_case__ :List[Any] = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase ,axes=(1, 2, 0) ) ,x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :List[Any] = np.random.randn(3 ,4 )
snake_case__ :Dict = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) ,transpose(UpperCamelCase ).numpy() ) )
snake_case__ :Union[str, Any] = np.random.randn(3 ,4 ,5 )
snake_case__ :Tuple = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ,axes=(1, 2, 0) ) ,transpose(UpperCamelCase ,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = np.random.randn(3 ,4 )
snake_case__ :str = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) ,transpose(UpperCamelCase ).numpy() ) )
snake_case__ :Optional[int] = np.random.randn(3 ,4 ,5 )
snake_case__ :List[Any] = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ,axes=(1, 2, 0) ) ,transpose(UpperCamelCase ,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[int] = np.random.randn(3 ,4 )
snake_case__ :Dict = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) ,np.asarray(transpose(UpperCamelCase ) ) ) )
snake_case__ :List[str] = np.random.randn(3 ,4 ,5 )
snake_case__ :Optional[Any] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ,axes=(1, 2, 0) ) ,np.asarray(transpose(UpperCamelCase ,axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Dict = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(4, 3) ) ,np.reshape(UpperCamelCase ,(4, 3) ) ) )
snake_case__ :Optional[Any] = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(12, 5) ) ,np.reshape(UpperCamelCase ,(12, 5) ) ) )
@require_torch
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[str] = np.random.randn(3 ,4 )
snake_case__ :Optional[Any] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(4, 3) ) ,reshape(UpperCamelCase ,(4, 3) ).numpy() ) )
snake_case__ :Union[str, Any] = np.random.randn(3 ,4 ,5 )
snake_case__ :Tuple = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(12, 5) ) ,reshape(UpperCamelCase ,(12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = np.random.randn(3 ,4 )
snake_case__ :Optional[Any] = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(4, 3) ) ,reshape(UpperCamelCase ,(4, 3) ).numpy() ) )
snake_case__ :int = np.random.randn(3 ,4 ,5 )
snake_case__ :Any = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(12, 5) ) ,reshape(UpperCamelCase ,(12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :int = np.random.randn(3 ,4 )
snake_case__ :List[Any] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(4, 3) ) ,np.asarray(reshape(UpperCamelCase ,(4, 3) ) ) ) )
snake_case__ :Tuple = np.random.randn(3 ,4 ,5 )
snake_case__ :Union[str, Any] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase ,(12, 5) ) ,np.asarray(reshape(UpperCamelCase ,(12, 5) ) ) ) )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Tuple = np.random.randn(1 ,3 ,4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) ,np.squeeze(UpperCamelCase ) ) )
snake_case__ :Any = np.random.randn(1 ,4 ,1 ,5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ,axis=2 ) ,np.squeeze(UpperCamelCase ,axis=2 ) ) )
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Any = np.random.randn(1 ,3 ,4 )
snake_case__ :Dict = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) ,squeeze(UpperCamelCase ).numpy() ) )
snake_case__ :Optional[Any] = np.random.randn(1 ,4 ,1 ,5 )
snake_case__ :Optional[Any] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ,axis=2 ) ,squeeze(UpperCamelCase ,axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = np.random.randn(1 ,3 ,4 )
snake_case__ :Dict = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) ,squeeze(UpperCamelCase ).numpy() ) )
snake_case__ :Optional[Any] = np.random.randn(1 ,4 ,1 ,5 )
snake_case__ :Dict = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ,axis=2 ) ,squeeze(UpperCamelCase ,axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :str = np.random.randn(1 ,3 ,4 )
snake_case__ :Optional[Any] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) ,np.asarray(squeeze(UpperCamelCase ) ) ) )
snake_case__ :Tuple = np.random.randn(1 ,4 ,1 ,5 )
snake_case__ :Optional[int] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ,axis=2 ) ,np.asarray(squeeze(UpperCamelCase ,axis=2 ) ) ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :int = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase ,axis=1 ) ,np.expand_dims(UpperCamelCase ,axis=1 ) ) )
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[str] = np.random.randn(3 ,4 )
snake_case__ :Optional[int] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase ,axis=1 ) ,expand_dims(UpperCamelCase ,axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = np.random.randn(3 ,4 )
snake_case__ :Optional[Any] = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase ,axis=1 ) ,expand_dims(UpperCamelCase ,axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[int] = np.random.randn(3 ,4 )
snake_case__ :Tuple = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase ,axis=1 ) ,np.asarray(expand_dims(UpperCamelCase ,axis=1 ) ) ) )
| 701
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
from string import ascii_uppercase
__UpperCAmelCase : List[Any] = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __snake_case : int , __snake_case : int ) -> Tuple:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
snake_case__ :Union[str, Any] = ""
snake_case__ :List[Any] = 0
snake_case__ :str = 0
while div != 1:
snake_case__ :Optional[Any] = divmod(snake_case_ , snake_case_ )
if base >= 11 and 9 < mod < 36:
snake_case__ :Union[str, Any] = ALPHABET_VALUES[str(snake_case_ )]
else:
snake_case__ :Optional[Any] = str(snake_case_ )
new_value += actual_value
snake_case__ :Any = num // base
snake_case__ :Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(snake_case_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[int] ) -> str:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str ) -> Tuple:
'''simple docstring'''
snake_case__ :Tuple = tmp_path / "cache"
snake_case__ :Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ :List[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ :Dict = tmp_path / "cache"
snake_case__ :List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ :Optional[Any] = features.copy() if features else default_expected_features
snake_case__ :int = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( __snake_case : int , __snake_case : str , __snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ :Union[str, Any] = tmp_path / "cache"
snake_case__ :Dict = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case__ :Optional[Any] = features.copy() if features else default_expected_features
snake_case__ :str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : str ) -> Any:
'''simple docstring'''
snake_case__ :List[str] = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
snake_case__ :int = features.copy()
snake_case__ :Optional[Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :Union[str, Any] = tmp_path / "cache"
snake_case__ :Dict = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = tmp_path / "cache"
snake_case__ :Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ :Optional[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase_ ( __snake_case : Any , __snake_case : Tuple , __snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
snake_case__ :List[Any] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
snake_case__ :Optional[int] = [jsonl_path]
snake_case__ :Tuple = tmp_path / "cache"
snake_case__ :Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ :Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any]=("train",) ) -> Any:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
snake_case__ :List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = tmp_path / "cache"
snake_case__ :Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ :Optional[int] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( __snake_case : str , __snake_case : Any , __snake_case : Tuple ) -> Tuple:
'''simple docstring'''
snake_case__ :str = tmp_path / "cache"
snake_case__ :List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ :Any = features.copy() if features else default_expected_features
snake_case__ :List[str] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ :int = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
if split:
snake_case__ :int = {split: jsonl_path}
else:
snake_case__ :Optional[Any] = "train"
snake_case__ :Dict = {"train": jsonl_path, "test": jsonl_path}
snake_case__ :Union[str, Any] = tmp_path / "cache"
snake_case__ :Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ :int = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase_ ( __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
return json.load(_lowerCamelCase )
def lowercase_ ( __snake_case : Any ) -> List[str]:
'''simple docstring'''
return [json.loads(_lowerCamelCase ) for line in buffer]
class _snake_case :
@pytest.mark.parametrize("lines, load_json_function" ,[(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,lines=UpperCamelCase ).write()
buffer.seek(0 )
snake_case__ :str = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase ,UpperCamelCase )
assert isinstance(exported_content[0] ,UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" ,[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,lines=UpperCamelCase ,orient=UpperCamelCase ).write()
buffer.seek(0 )
snake_case__ :Union[str, Any] = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase ,UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase ,"keys" ) and not hasattr(exported_content[0] ,"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" ,[(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,lines=UpperCamelCase ,num_proc=2 ).write()
buffer.seek(0 )
snake_case__ :List[Any] = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase ,UpperCamelCase )
assert isinstance(exported_content[0] ,UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" ,[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,lines=UpperCamelCase ,orient=UpperCamelCase ,num_proc=2 ).write()
buffer.seek(0 )
snake_case__ :Dict = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase ,UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase ,"keys" ) and not hasattr(exported_content[0] ,"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,num_proc=0 )
@pytest.mark.parametrize("compression, extension" ,[("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Dict = tmp_path_factory.mktemp("data" ) / f'test.json.{extension}'
snake_case__ :List[str] = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(UpperCamelCase ,UpperCamelCase ,compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase ,"rb" ,compression="infer" ) as f:
snake_case__ :Dict = f.read()
with fsspec.open(UpperCamelCase ,"rb" ,compression="infer" ) as f:
snake_case__ :Any = f.read()
assert exported_content == original_content
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( __snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :List[str] = image.size
snake_case__ :Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case__ :Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
snake_case__ :Dict = np.array(__snake_case ).astype(np.floataa ) / 2_5_5.0
snake_case__ :List[str] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case__ :int = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Any:
super().__init__()
self.register_modules(vqvae=_a ,unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self ,UpperCamelCase = None ,UpperCamelCase = 1 ,UpperCamelCase = 100 ,UpperCamelCase = 0.0 ,UpperCamelCase = None ,UpperCamelCase = "pil" ,UpperCamelCase = True ,) -> Dict:
if isinstance(_a ,PIL.Image.Image ):
snake_case__ :Any = 1
elif isinstance(_a ,torch.Tensor ):
snake_case__ :Optional[int] = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a ,PIL.Image.Image ):
snake_case__ :str = preprocess(_a )
snake_case__ :Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case__ :Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case__ :Optional[int] = next(self.unet.parameters() ).dtype
snake_case__ :int = randn_tensor(_a ,generator=_a ,device=self.device ,dtype=_a )
snake_case__ :List[Any] = image.to(device=self.device ,dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a ,device=self.device )
snake_case__ :Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ :Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ :List[Any] = {}
if accepts_eta:
snake_case__ :Union[str, Any] = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
snake_case__ :List[str] = torch.cat([latents, image] ,dim=1 )
snake_case__ :Optional[Any] = self.scheduler.scale_model_input(_a ,_a )
# predict the noise residual
snake_case__ :Union[str, Any] = self.unet(_a ,_a ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ :Union[str, Any] = self.scheduler.step(_a ,_a ,_a ,**_a ).prev_sample
# decode the image latents with the VQVAE
snake_case__ :Any = self.vqvae.decode(_a ).sample
snake_case__ :Tuple = torch.clamp(_a ,-1.0 ,1.0 )
snake_case__ :int = image / 2 + 0.5
snake_case__ :Any = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
snake_case__ :List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase_ ( __snake_case : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCAmelCase : int = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _snake_case ( __a ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> str:
snake_case__ :Union[str, Any] = parser.add_parser(
"convert" ,help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." ,)
train_parser.add_argument("--model_type" ,type=a_ ,required=a_ ,help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" ,type=a_ ,required=a_ ,help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" ,type=a_ ,required=a_ ,help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" ,type=a_ ,default="" ,help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" ,type=a_ ,default=a_ ,help="Optional fine-tuning task name if the TF model was a finetuned model." ,)
train_parser.set_defaults(func=a_ )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,*UpperCamelCase ,) -> Optional[int]:
snake_case__ :str = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'Loading model {model_type}' )
snake_case__ :Any = model_type
snake_case__ :Union[str, Any] = tf_checkpoint
snake_case__ :List[Any] = pytorch_dump_output
snake_case__ :List[Any] = config
snake_case__ :Optional[int] = finetuning_task_name
def lowerCAmelCase_ ( self ) -> List[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
if "ckpt" in self._tf_checkpoint.lower():
snake_case__ :Tuple = self._tf_checkpoint
snake_case__ :Any = """"""
else:
snake_case__ :Any = self._tf_checkpoint
snake_case__ :Dict = """"""
convert_transfo_xl_checkpoint_to_pytorch(
a_ ,self._config ,self._pytorch_dump_output ,a_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = torch.device("cpu")
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ :int = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ :int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ :List[Any] = dct.pop(__snake_case )
snake_case__ :Optional[Any] = val
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
for k in state_dict.keys():
snake_case__ :int = k
if ".pwconv" in k:
snake_case__ :Tuple = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
snake_case__ :Tuple = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
snake_case__ :List[Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
snake_case__ :Tuple = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
snake_case__ :Union[str, Any] = k_new.split("." )
if ls[2].isdigit():
snake_case__ :Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
snake_case__ :Union[str, Any] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase_ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ :str = 10_00
snake_case__ :Tuple = "huggingface/label-files"
snake_case__ :Union[str, Any] = "imagenet-1k-id2label.json"
snake_case__ :int = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
snake_case__ :Tuple = {int(__snake_case ): v for k, v in idalabel.items()}
snake_case__ :List[str] = idalabel
snake_case__ :List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ :Dict = [3, 3, 6, 4]
snake_case__ :Any = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
snake_case__ :str = [3, 3, 9, 6]
snake_case__ :Optional[int] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
snake_case__ :List[str] = [4, 3, 10, 5]
snake_case__ :List[Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
snake_case__ :Tuple = [4, 4, 12, 6]
snake_case__ :List[Any] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
snake_case__ :Any = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" , check_hash=__snake_case )
else:
snake_case__ :Tuple = torch.load(__snake_case , map_location="cpu" )
snake_case__ :List[Any] = checkpoint
snake_case__ :List[str] = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
snake_case__ :Optional[Any] = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
snake_case__ :Optional[int] = prepare_img()
snake_case__ :List[str] = ViTImageProcessor.from_pretrained("preprocessor_config" )
snake_case__ :Dict = processor(images=__snake_case , return_tensors="pt" )
# compare outputs from both models
snake_case__ :int = get_expected_output(__snake_case )
snake_case__ :List[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
def lowercase_ ( __snake_case : Optional[Any] ):
'''simple docstring'''
snake_case__ :Optional[int] = [False] * len(UpperCAmelCase__ )
snake_case__ :Union[str, Any] = [-1] * len(UpperCAmelCase__ )
def dfs(__snake_case : str , __snake_case : Optional[Any] ):
snake_case__ :List[str] = True
snake_case__ :str = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase : List[str] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase : List[Any] = logging.getLogger(__name__)
class _snake_case ( a__ ):
_A = """summarization"""
_A = ["""loss"""]
_A = ROUGE_KEYS
_A = """rouge2"""
def __init__( self ,UpperCamelCase ,**UpperCamelCase ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case__ :Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowerCAmelCase__ ,num_labels=lowerCAmelCase__ ,mode=self.mode ,**lowerCAmelCase__ )
use_task_specific_params(self.model ,"summarization" )
save_git_info(self.hparams.output_dir )
snake_case__ :Optional[Any] = Path(self.output_dir ) / "metrics.json"
snake_case__ :int = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams ,self.hparams_save_path )
snake_case__ :List[Any] = 0
snake_case__ :Union[str, Any] = defaultdict(lowerCAmelCase__ )
snake_case__ :Any = self.config.model_type
snake_case__ :str = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
snake_case__ :dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case__ :Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
snake_case__ :Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case__ :str = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case__ :Optional[Any] = get_git_info()["repo_sha"]
snake_case__ :List[Any] = hparams.num_workers
snake_case__ :Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,lowerCAmelCase__ ):
snake_case__ :Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case__ :Optional[Any] = self.decoder_start_token_id
snake_case__ :Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer ,"prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
snake_case__ :Optional[Any] = False
snake_case__ :int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case__ :Any = self.hparams.eval_max_gen_length
else:
snake_case__ :Optional[int] = self.model.config.max_length
snake_case__ :Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict[str, List[str]]:
snake_case__ :str = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ ,Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / "tok_batch.json" )
snake_case__ :Any = True
return readable_batch
def lowerCAmelCase_ ( self ,UpperCamelCase ,**UpperCamelCase ) -> int:
return self.model(lowerCAmelCase__ ,**lowerCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :str = self.tokenizer.batch_decode(
lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ ,clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip ,lowerCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
snake_case__ :List[Any] = self.tokenizer.pad_token_id
snake_case__ :Optional[int] = batch["input_ids"], batch["attention_mask"]
snake_case__ :Tuple = batch["labels"]
if isinstance(self.model ,lowerCAmelCase__ ):
snake_case__ :Optional[int] = self.model._shift_right(lowerCAmelCase__ )
else:
snake_case__ :Optional[int] = shift_tokens_right(lowerCAmelCase__ ,lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case__ :List[str] = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
snake_case__ :Any = self(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,decoder_input_ids=lowerCAmelCase__ ,use_cache=lowerCAmelCase__ )
snake_case__ :Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case__ :List[str] = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
snake_case__ :Optional[int] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
snake_case__ :Tuple = nn.functional.log_softmax(lowerCAmelCase__ ,dim=-1 )
snake_case__ :Any = label_smoothed_nll_loss(
lowerCAmelCase__ ,lowerCAmelCase__ ,self.hparams.label_smoothing ,ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def lowerCAmelCase_ ( self ) -> int:
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :Optional[int] = self._step(lowerCAmelCase__ )
snake_case__ :Dict = dict(zip(self.loss_names ,lowerCAmelCase__ ) )
# tokens per batch
snake_case__ :List[Any] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
snake_case__ :List[Any] = batch["input_ids"].shape[0]
snake_case__ :Any = batch["input_ids"].eq(self.pad ).sum()
snake_case__ :str = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return self._generative_step(lowerCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase="val" ) -> Dict:
self.step_count += 1
snake_case__ :List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case__ :int = losses["loss"]
snake_case__ :Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
snake_case__ :Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case__ :torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
snake_case__ :Optional[int] = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
snake_case__ :List[Any] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
snake_case__ :Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return calculate_rouge(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> dict:
snake_case__ :Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case__ :Optional[int] = self.model.generate(
batch["input_ids"] ,attention_mask=batch["attention_mask"] ,use_cache=lowerCAmelCase__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
snake_case__ :str = (time.time() - ta) / batch["input_ids"].shape[0]
snake_case__ :List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
snake_case__ :List[str] = self.ids_to_clean_text(batch["labels"] )
snake_case__ :int = self._step(lowerCAmelCase__ )
snake_case__ :int = dict(zip(self.loss_names ,lowerCAmelCase__ ) )
snake_case__ :Dict = self.calc_generative_metrics(lowerCAmelCase__ ,lowerCAmelCase__ )
snake_case__ :Optional[int] = np.mean(lmap(lowerCAmelCase__ ,lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ ,gen_len=lowerCAmelCase__ ,preds=lowerCAmelCase__ ,target=lowerCAmelCase__ ,**lowerCAmelCase__ )
return base_metrics
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
return self._generative_step(lowerCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
return self.validation_epoch_end(lowerCAmelCase__ ,prefix="test" )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> SeqaSeqDataset:
snake_case__ :Union[str, Any] = self.n_obs[type_path]
snake_case__ :List[str] = self.target_lens[type_path]
snake_case__ :Any = self.dataset_class(
self.tokenizer ,type_path=lowerCAmelCase__ ,n_obs=lowerCAmelCase__ ,max_target_length=lowerCAmelCase__ ,**self.dataset_kwargs ,)
return dataset
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> DataLoader:
snake_case__ :Tuple = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case__ :int = dataset.make_sortish_sampler(lowerCAmelCase__ ,distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCAmelCase__ ,num_workers=self.num_workers ,sampler=lowerCAmelCase__ ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case__ :Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ ,batch_sampler=lowerCAmelCase__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCAmelCase__ ,num_workers=self.num_workers ,sampler=lowerCAmelCase__ ,)
def lowerCAmelCase_ ( self ) -> DataLoader:
snake_case__ :List[str] = self.get_dataloader("train" ,batch_size=self.hparams.train_batch_size ,shuffle=lowerCAmelCase__ )
return dataloader
def lowerCAmelCase_ ( self ) -> DataLoader:
return self.get_dataloader("val" ,batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ) -> DataLoader:
return self.get_dataloader("test" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowerCAmelCase__ ,lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ ,lowerCAmelCase__ )
parser.add_argument(
"--max_source_length" ,default=1_024 ,type=lowerCAmelCase__ ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--max_target_length" ,default=56 ,type=lowerCAmelCase__ ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--val_max_target_length" ,default=142 ,type=lowerCAmelCase__ ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--test_max_target_length" ,default=142 ,type=lowerCAmelCase__ ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument("--freeze_encoder" ,action="store_true" )
parser.add_argument("--freeze_embeds" ,action="store_true" )
parser.add_argument("--sortish_sampler" ,action="store_true" ,default=lowerCAmelCase__ )
parser.add_argument("--overwrite_output_dir" ,action="store_true" ,default=lowerCAmelCase__ )
parser.add_argument("--max_tokens_per_batch" ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ )
parser.add_argument("--logger_name" ,type=lowerCAmelCase__ ,choices=["default", "wandb", "wandb_shared"] ,default="default" )
parser.add_argument("--n_train" ,type=lowerCAmelCase__ ,default=-1 ,required=lowerCAmelCase__ ,help="# examples. -1 means use all." )
parser.add_argument("--n_val" ,type=lowerCAmelCase__ ,default=500 ,required=lowerCAmelCase__ ,help="# examples. -1 means use all." )
parser.add_argument("--n_test" ,type=lowerCAmelCase__ ,default=-1 ,required=lowerCAmelCase__ ,help="# examples. -1 means use all." )
parser.add_argument(
"--task" ,type=lowerCAmelCase__ ,default="summarization" ,required=lowerCAmelCase__ ,help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" ,type=lowerCAmelCase__ ,default=0.0 ,required=lowerCAmelCase__ )
parser.add_argument("--src_lang" ,type=lowerCAmelCase__ ,default="" ,required=lowerCAmelCase__ )
parser.add_argument("--tgt_lang" ,type=lowerCAmelCase__ ,default="" ,required=lowerCAmelCase__ )
parser.add_argument("--eval_beams" ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,required=lowerCAmelCase__ )
parser.add_argument(
"--val_metric" ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,required=lowerCAmelCase__ ,choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,help="never generate more than n tokens" )
parser.add_argument("--save_top_k" ,type=lowerCAmelCase__ ,default=1 ,required=lowerCAmelCase__ ,help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" ,type=lowerCAmelCase__ ,default=-1 ,required=lowerCAmelCase__ ,help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) ,)
return parser
class _snake_case ( a__ ):
_A = """translation"""
_A = ["""loss"""]
_A = ["""bleu"""]
_A = """bleu"""
def __init__( self ,UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ ,**lowerCAmelCase__ )
snake_case__ :Optional[Any] = hparams.src_lang
snake_case__ :Union[str, Any] = hparams.tgt_lang
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> dict:
return calculate_bleu(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Tuple=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case__ :SummarizationModule = SummarizationModule(_lowercase )
else:
snake_case__ :SummarizationModule = TranslationModule(_lowercase )
snake_case__ :int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
snake_case__ :Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case__ :Optional[int] = os.environ.get("WANDB_PROJECT" , _lowercase )
snake_case__ :Dict = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case__ :str = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
snake_case__ :Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case__ :Optional[int] = False
snake_case__ :Optional[Any] = args.val_metric == "loss"
snake_case__ :pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
snake_case__ :Optional[Any] = ""
snake_case__ :Dict = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_lowercase ) )
if checkpoints:
snake_case__ :Optional[int] = checkpoints[-1]
snake_case__ :int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
__UpperCAmelCase : Optional[int] = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
__UpperCAmelCase : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def lowercase_ ( __snake_case : float ) -> List[Any]:
'''simple docstring'''
assert type(UpperCAmelCase__ ) in (int, float) and decimal == int(UpperCAmelCase__ )
snake_case__ :Dict = int(UpperCAmelCase__ )
snake_case__ :Optional[Any] = ""
snake_case__ :Any = False
if decimal < 0:
snake_case__ :int = True
decimal *= -1
while decimal > 0:
snake_case__ , snake_case__ :Union[str, Any] = divmod(UpperCAmelCase__ , 16 )
snake_case__ :Union[str, Any] = values[remainder] + hexadecimal
snake_case__ :Tuple = "0x" + hexadecimal
if negative:
snake_case__ :Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : List[str] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
from math import factorial
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :str = real
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Dict = [1] * rank
else:
snake_case__ :Optional[int] = rank
def __repr__( self ) -> List[Any]:
return (
f'{self.real}+'
f'{"+".join(str(UpperCamelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,UpperCamelCase__ )
def __add__( self ,UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
return Dual(self.real + other ,self.duals )
snake_case__ :List[Any] = self.duals.copy()
snake_case__ :List[str] = other.duals.copy()
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
o_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
elif len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
s_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
snake_case__ :Dict = []
for i in range(len(UpperCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,UpperCamelCase__ )
_A = __add__
def __sub__( self ,UpperCamelCase ) -> Any:
return self + other * -1
def __mul__( self ,UpperCamelCase ) -> List[Any]:
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,UpperCamelCase__ )
snake_case__ :Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,UpperCamelCase__ )
_A = __mul__
def __truediv__( self ,UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,UpperCamelCase__ )
raise ValueError
def __floordiv__( self ,UpperCamelCase ) -> Union[str, Any]:
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,UpperCamelCase__ )
raise ValueError
def __pow__( self ,UpperCamelCase ) -> List[str]:
if n < 0 or isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case__ :str = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase_ ( __snake_case : int , __snake_case : Dict , __snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowercase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowercase , _lowercase ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case__ :List[Any] = Dual(_lowercase , 1 )
snake_case__ :Dict = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase_ ( __snake_case : Dict ) -> Any:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : str = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any] = 8 , __snake_case : List[str] = DEFAULT_DEVICE , __snake_case : Dict=False , __snake_case : List[Any]="summarization" , __snake_case : Tuple=None , **__snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
snake_case__ :Union[str, Any] = Path(__UpperCamelCase ).open("w" , encoding="utf-8" )
snake_case__ :Union[str, Any] = str(__UpperCamelCase )
snake_case__ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
if fpaa:
snake_case__ :Any = model.half()
snake_case__ :Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
snake_case__ :Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(__UpperCamelCase , __UpperCamelCase )
if prefix is None:
snake_case__ :List[Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(__UpperCamelCase , __UpperCamelCase ) ) ):
snake_case__ :int = [prefix + text for text in examples_chunk]
snake_case__ :Optional[Any] = tokenizer(__UpperCamelCase , return_tensors="pt" , truncation=__UpperCamelCase , padding="longest" ).to(__UpperCamelCase )
snake_case__ :int = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__UpperCamelCase , )
snake_case__ :int = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
snake_case__ :Dict = int(time.time() - start_time ) # seconds
snake_case__ :str = len(__UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase_ ( ) -> Dict:
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowercase_ ( __snake_case : List[str]=True ) -> Optional[int]:
'''simple docstring'''
snake_case__ :str = argparse.ArgumentParser()
parser.add_argument("model_name" , type=__UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=__UpperCamelCase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=__UpperCamelCase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=__UpperCamelCase , required=__UpperCamelCase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=__UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__UpperCamelCase , default=8 , required=__UpperCamelCase , help="batch size" )
parser.add_argument(
"--n_obs" , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=__UpperCamelCase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case__ , snake_case__ :Tuple = parser.parse_known_args()
snake_case__ :Dict = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}' )
snake_case__ :int = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case__ :Union[str, Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
snake_case__ :Tuple = generate_summaries_or_translations(
__UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
snake_case__ :Optional[int] = calculate_bleu if "translation" in args.task else calculate_rouge
snake_case__ :Any = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case__ :Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__UpperCamelCase )]
snake_case__ :Union[str, Any] = score_fn(__UpperCamelCase , __UpperCamelCase )
scores.update(__UpperCamelCase )
if args.dump_args:
scores.update(__UpperCamelCase )
if args.info:
snake_case__ :List[Any] = args.info
if verbose:
print(__UpperCamelCase )
if args.score_path is not None:
json.dump(__UpperCamelCase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 713
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def lowerCAmelCase_ ( *UpperCamelCase ,**UpperCamelCase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _snake_case ( unittest.TestCase ):
_A = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :int = ObjectDetectionPipeline(model=UpperCamelCase ,image_processor=UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Any:
snake_case__ :int = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" ,threshold=0.0 )
self.assertGreater(len(UpperCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
UpperCamelCase ,{
"score": ANY(UpperCamelCase ),
"label": ANY(UpperCamelCase ),
"box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )},
} ,)
import datasets
snake_case__ :Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" ,"image" ,split="test" )
snake_case__ :str = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
snake_case__ :Any = object_detector(UpperCamelCase ,threshold=0.0 )
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(UpperCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
UpperCamelCase ,{
"score": ANY(UpperCamelCase ),
"label": ANY(UpperCamelCase ),
"box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )},
} ,)
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
@require_torch
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = """hf-internal-testing/tiny-detr-mobilenetsv3"""
snake_case__ :Union[str, Any] = AutoModelForObjectDetection.from_pretrained(UpperCamelCase )
snake_case__ :Any = AutoFeatureExtractor.from_pretrained(UpperCamelCase )
snake_case__ :int = ObjectDetectionPipeline(model=UpperCamelCase ,feature_extractor=UpperCamelCase )
snake_case__ :int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=0.0 )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] ,)
snake_case__ :str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Union[str, Any] = """facebook/detr-resnet-50"""
snake_case__ :Dict = AutoModelForObjectDetection.from_pretrained(UpperCamelCase )
snake_case__ :Any = AutoFeatureExtractor.from_pretrained(UpperCamelCase )
snake_case__ :Tuple = ObjectDetectionPipeline(model=UpperCamelCase ,feature_extractor=UpperCamelCase )
snake_case__ :Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
snake_case__ :str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Any = """facebook/detr-resnet-50"""
snake_case__ :int = pipeline("object-detection" ,model=UpperCamelCase )
snake_case__ :Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
snake_case__ :Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[Any] = 0.9985
snake_case__ :Tuple = """facebook/detr-resnet-50"""
snake_case__ :str = pipeline("object-detection" ,model=UpperCamelCase )
snake_case__ :Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Union[str, Any] = """Narsil/layoutlmv3-finetuned-funsd"""
snake_case__ :Union[str, Any] = 0.9993
snake_case__ :Any = pipeline("object-detection" ,model=UpperCamelCase ,threshold=UpperCamelCase )
snake_case__ :Optional[int] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(UpperCamelCase ,decimals=4 ) ,[
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] ,)
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( lowercase__ ):
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase ,"hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowercase ,"num_attention_heads" ) )
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=64 ,UpperCamelCase=3 ,UpperCamelCase=3 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=16 ,UpperCamelCase=[128, 256, 384] ,UpperCamelCase=[4, 6, 8] ,UpperCamelCase=[2, 3, 4] ,UpperCamelCase=[16, 16, 16] ,UpperCamelCase=0 ,UpperCamelCase=[2, 2, 2] ,UpperCamelCase=[2, 2, 2] ,UpperCamelCase=0.02 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=2 ,) -> int:
snake_case__ :List[str] = parent
snake_case__ :Tuple = batch_size
snake_case__ :Optional[int] = image_size
snake_case__ :str = num_channels
snake_case__ :int = kernel_size
snake_case__ :Optional[int] = stride
snake_case__ :List[Any] = padding
snake_case__ :str = hidden_sizes
snake_case__ :int = num_attention_heads
snake_case__ :str = depths
snake_case__ :Optional[int] = key_dim
snake_case__ :Any = drop_path_rate
snake_case__ :Optional[Any] = patch_size
snake_case__ :List[str] = attention_ratio
snake_case__ :Tuple = mlp_ratio
snake_case__ :str = initializer_range
snake_case__ :int = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case__ :List[str] = is_training
snake_case__ :Optional[int] = use_labels
snake_case__ :str = num_labels
snake_case__ :Tuple = initializer_range
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :int = None
if self.use_labels:
snake_case__ :List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
snake_case__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> Dict:
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
snake_case__ :List[str] = LevitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ :Dict = model(__lowercase )
snake_case__ :Union[str, Any] = (self.image_size, self.image_size)
snake_case__ , snake_case__ :List[Any] = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ :Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case__ :Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Optional[int] = self.num_labels
snake_case__ :List[str] = LevitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ :str = model(__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ :Optional[Any] = config_and_inputs
snake_case__ :List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase ):
_A = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_A = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = LevitModelTester(self )
snake_case__ :str = ConfigTester(self ,config_class=__lowercase ,has_text_modality=__lowercase ,hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> Tuple:
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Tuple = model_class(__lowercase )
snake_case__ :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Optional[Any] = [*signature.parameters.keys()]
snake_case__ :Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowercase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
def check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
snake_case__ :List[str] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
snake_case__ :List[str] = model(**self._prepare_for_class(__lowercase ,__lowercase ) )
snake_case__ :str = outputs.hidden_states
snake_case__ :int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowercase ) ,__lowercase )
snake_case__ :Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
snake_case__ , snake_case__ :Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ :int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case__ :Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
snake_case__ , snake_case__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :List[Any] = True
check_hidden_states_output(__lowercase ,__lowercase ,__lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :str = True
check_hidden_states_output(__lowercase ,__lowercase ,__lowercase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> Dict:
snake_case__ :Any = super()._prepare_for_class(__lowercase ,__lowercase ,return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def lowerCAmelCase_ ( self ) -> Dict:
if not self.model_tester.is_training:
return
snake_case__ , snake_case__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case__ :Tuple = model_class(__lowercase )
model.to(__lowercase )
model.train()
snake_case__ :Dict = self._prepare_for_class(__lowercase ,__lowercase ,return_labels=__lowercase )
snake_case__ :int = model(**__lowercase ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ :List[Any] = False
snake_case__ :int = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case__ :Tuple = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
snake_case__ :Union[str, Any] = self._prepare_for_class(__lowercase ,__lowercase ,return_labels=__lowercase )
snake_case__ :Dict = model(**__lowercase ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ , snake_case__ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Optional[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
snake_case__ :Optional[int] = problem_type["title"]
snake_case__ :Tuple = problem_type["num_labels"]
snake_case__ :List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
snake_case__ :Dict = self._prepare_for_class(__lowercase ,__lowercase ,return_labels=__lowercase )
if problem_type["num_labels"] > 1:
snake_case__ :Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] )
snake_case__ :List[str] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowercase ) as warning_list:
snake_case__ :Dict = model(**__lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCAmelCase_ ( self ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :Optional[Any] = LevitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( ) -> str:
'''simple docstring'''
snake_case__ :Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Union[str, Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
snake_case__ :str = self.default_image_processor
snake_case__ :Tuple = prepare_img()
snake_case__ :Union[str, Any] = image_processor(images=__lowercase ,return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
snake_case__ :str = model(**__lowercase )
# verify the logits
snake_case__ :int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowercase )
snake_case__ :int = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowercase ,atol=1E-4 ) )
| 715
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Any=10_00 ) -> Optional[int]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ :Union[str, Any] = n - 1
snake_case__ :Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ :Optional[int] = 0
while count < prec:
snake_case__ :Optional[int] = random.randint(2 , n - 1 )
snake_case__ :Dict = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
snake_case__ :List[Any] = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
snake_case__ :Optional[Any] = False
break
snake_case__ :Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 716
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCAmelCase : Optional[int] = {
# 1536-bit
5: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
# 2048-bit
1_4: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
# 3072-bit
1_5: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
# 4096-bit
1_6: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
# 6144-bit
1_7: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
# 8192-bit
1_8: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=1_6,
),
'''generator''': 2,
},
}
class _snake_case :
def __init__( self ,UpperCamelCase = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case__ :List[str] = primes[group]["prime"]
snake_case__ :Tuple = primes[group]["generator"]
snake_case__ :Optional[int] = int(hexlify(urandom(32 ) ) ,base=16 )
def lowerCAmelCase_ ( self ) -> str:
return hex(self.__private_key )[2:]
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Dict = pow(self.generator ,self.__private_key ,self.prime )
return hex(UpperCamelCase )[2:]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :List[str] = int(UpperCamelCase ,base=16 )
if not self.is_valid_public_key(UpperCamelCase ):
raise ValueError("Invalid public key" )
snake_case__ :Tuple = pow(UpperCamelCase ,self.__private_key ,self.prime )
return shaaaa(str(UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase ,(prime - 1) // 2 ,UpperCamelCase ) == 1
)
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 14 ) -> str:
snake_case__ :Union[str, Any] = int(UpperCamelCase ,base=16 )
snake_case__ :Any = int(UpperCamelCase ,base=16 )
snake_case__ :Any = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase ,UpperCamelCase ):
raise ValueError("Invalid public key" )
snake_case__ :Union[str, Any] = pow(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return shaaaa(str(UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( UpperCAmelCase__ , unittest.TestCase ):
_A = LEDTokenizer
_A = LEDTokenizerFast
_A = True
def lowerCAmelCase_ ( self ) -> List[str]:
super().setUp()
snake_case__ :Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case__ :Tuple = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
snake_case__ :Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ :Optional[int] = {"unk_token": "<unk>"}
snake_case__ :Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ :str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase_ ( self ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case__ :Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :Any = tokenizer(lowerCamelCase__ ,max_length=len(lowerCamelCase__ ) ,padding=lowerCamelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
snake_case__ :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :Optional[Any] = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCamelCase__ )
self.assertIn("attention_mask" ,lowerCamelCase__ )
self.assertNotIn("labels" ,lowerCamelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCamelCase__ )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :Dict = tokenizer(text_target=lowerCamelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :Union[str, Any] = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = ["A long paragraph for summarization."]
snake_case__ :List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :List[str] = tokenizer(lowerCamelCase__ ,return_tensors="pt" )
snake_case__ :Tuple = tokenizer(text_target=lowerCamelCase__ ,return_tensors="pt" )
snake_case__ :str = inputs["input_ids"]
snake_case__ :Optional[int] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ :List[Any] = ["Summary of the text.", "Another summary."]
snake_case__ :Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case__ :List[str] = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ )
snake_case__ :List[Any] = [[0] * len(lowerCamelCase__ ) for x in encoded_output["input_ids"]]
snake_case__ :Tuple = tokenizer.pad(lowerCamelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCamelCase__ )
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case__ :Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
snake_case__ :Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
snake_case__ :Dict = "A, <mask> AllenNLP sentence."
snake_case__ :List[str] = tokenizer_r.encode_plus(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ )
snake_case__ :List[Any] = tokenizer_p.encode_plus(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
snake_case__ :int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case__ :List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 718
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 0
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Tuple = logging.getLogger()
def lowercase_ ( __snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = {}
snake_case__ :Optional[Any] = os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
snake_case__ :Tuple = json.load(lowercase__ )
else:
raise ValueError(F'can\'t find {path}' )
return results
__UpperCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _snake_case ( __a ):
def lowerCAmelCase_ ( self ) -> List[Any]:
import xla_spawn
snake_case__ :List[str] = self.get_auto_remove_tmp_dir()
snake_case__ :Optional[Any] = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCAmelCase_ ,"argv" ,lowerCAmelCase_ ):
snake_case__ :List[Any] = time()
xla_spawn.main()
snake_case__ :Tuple = time()
snake_case__ :Union[str, Any] = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def lowerCAmelCase_ ( self ) -> Tuple:
import xla_spawn
snake_case__ :str = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ ,"argv" ,lowerCAmelCase_ ):
xla_spawn.main()
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : int = logging.get_logger(__name__)
class _snake_case ( __lowercase ):
_A = ['''pixel_values''']
def __init__( self ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ,) -> None:
super().__init__(**__a )
snake_case__ :Optional[Any] = size if size is not None else {"""shortest_edge""": 384}
snake_case__ :List[str] = get_size_dict(__a ,default_to_square=__a )
snake_case__ :Tuple = do_resize
snake_case__ :Dict = size
# Default value set here for backwards compatibility where the value in config is None
snake_case__ :List[Any] = crop_pct if crop_pct is not None else 224 / 256
snake_case__ :Dict = resample
snake_case__ :Any = do_rescale
snake_case__ :str = rescale_factor
snake_case__ :Optional[Any] = do_normalize
snake_case__ :int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ :int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BICUBIC ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[Any] = get_size_dict(__a ,default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
snake_case__ :Optional[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case__ :Union[str, Any] = int(shortest_edge / crop_pct )
snake_case__ :str = get_resize_output_image_size(__a ,size=__a ,default_to_square=__a )
snake_case__ :Optional[int] = resize(image=__a ,size=__a ,resample=__a ,data_format=__a ,**__a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a ,size=(shortest_edge, shortest_edge) ,data_format=__a ,**__a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a ,size=(shortest_edge, shortest_edge) ,resample=__a ,data_format=__a ,**__a )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> Union[str, Any]:
return rescale(__a ,scale=__a ,data_format=__a ,**__a )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
return normalize(__a ,mean=__a ,std=__a ,data_format=__a ,**__a )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Dict = do_resize if do_resize is not None else self.do_resize
snake_case__ :List[str] = crop_pct if crop_pct is not None else self.crop_pct
snake_case__ :Optional[int] = resample if resample is not None else self.resample
snake_case__ :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :int = image_mean if image_mean is not None else self.image_mean
snake_case__ :Tuple = image_std if image_std is not None else self.image_std
snake_case__ :Tuple = size if size is not None else self.size
snake_case__ :Any = get_size_dict(__a ,default_to_square=__a )
snake_case__ :Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case__ :int = [to_numpy_array(__a ) for image in images]
if do_resize:
snake_case__ :Tuple = [self.resize(image=__a ,size=__a ,crop_pct=__a ,resample=__a ) for image in images]
if do_rescale:
snake_case__ :Tuple = [self.rescale(image=__a ,scale=__a ) for image in images]
if do_normalize:
snake_case__ :List[str] = [self.normalize(image=__a ,mean=__a ,std=__a ) for image in images]
snake_case__ :Dict = [to_channel_dimension_format(__a ,__a ) for image in images]
snake_case__ :int = {"""pixel_values""": images}
return BatchFeature(data=__a ,tensor_type=__a )
| 720
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__UpperCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PegasusTokenizer
_A = ['input_ids', 'attention_mask']
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<pad>" ,UpperCamelCase="</s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<mask_2>" ,UpperCamelCase="<mask_1>" ,UpperCamelCase=None ,UpperCamelCase=103 ,**UpperCamelCase ,) -> Optional[int]:
snake_case__ :List[str] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
raise TypeError(
f'additional_special_tokens should be of type {type(UpperCAmelCase__ )}, but is'
f' {type(UpperCAmelCase__ )}' )
snake_case__ :Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(UpperCAmelCase__ ) ,self.offset - 1 )
]
if len(set(UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case__ :Any = additional_special_tokens_extended
else:
snake_case__ :int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 ,self.offset )]
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,eos_token=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,mask_token_sent=UpperCAmelCase__ ,offset=UpperCAmelCase__ ,additional_special_tokens=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
snake_case__ :Tuple = vocab_file
snake_case__ :Any = False if not self.vocab_file else True
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :Optional[int] = os.path.join(
UpperCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file ,UpperCAmelCase__ )
return (out_vocab_file,)
| 721
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__UpperCAmelCase : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_A = 10000
_A = None
_A = None
class _snake_case ( datasets.ArrowBasedBuilder ):
_A = ParquetConfig
def lowerCAmelCase_ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ ,(str, list, tuple) ):
snake_case__ :List[Any] = data_files
if isinstance(A__ ,A__ ):
snake_case__ :Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
snake_case__ :Any = []
for split_name, files in data_files.items():
if isinstance(A__ ,A__ ):
snake_case__ :List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :Union[str, Any] = [dl_manager.iter_files(A__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A__ ):
with open(A__ ,"rb" ) as f:
snake_case__ :Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(A__ ) )
break
splits.append(datasets.SplitGenerator(name=A__ ,gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ :int = table_cast(A__ ,self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
with open(A__ ,"rb" ) as f:
snake_case__ :List[str] = pq.ParquetFile(A__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
snake_case__ :Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(A__ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(A__ )}: {e}' )
raise
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _snake_case ( _A ):
_A = 'efficientformer'
def __init__( self ,UpperCamelCase = [3, 2, 6, 4] ,UpperCamelCase = [48, 96, 224, 448] ,UpperCamelCase = [True, True, True, True] ,UpperCamelCase = 448 ,UpperCamelCase = 32 ,UpperCamelCase = 4 ,UpperCamelCase = 7 ,UpperCamelCase = 5 ,UpperCamelCase = 8 ,UpperCamelCase = 4 ,UpperCamelCase = 0.0 ,UpperCamelCase = 16 ,UpperCamelCase = 3 ,UpperCamelCase = 3 ,UpperCamelCase = 3 ,UpperCamelCase = 2 ,UpperCamelCase = 1 ,UpperCamelCase = 0.0 ,UpperCamelCase = 1 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = 1E-5 ,UpperCamelCase = "gelu" ,UpperCamelCase = 0.02 ,UpperCamelCase = 1E-12 ,UpperCamelCase = 224 ,UpperCamelCase = 1E-05 ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :Tuple = hidden_act
snake_case__ :Any = hidden_dropout_prob
snake_case__ :Any = hidden_sizes
snake_case__ :Union[str, Any] = num_hidden_layers
snake_case__ :List[Any] = num_attention_heads
snake_case__ :Tuple = initializer_range
snake_case__ :Tuple = layer_norm_eps
snake_case__ :List[Any] = patch_size
snake_case__ :Optional[Any] = num_channels
snake_case__ :Dict = depths
snake_case__ :Union[str, Any] = mlp_expansion_ratio
snake_case__ :Optional[int] = downsamples
snake_case__ :Union[str, Any] = dim
snake_case__ :Optional[Any] = key_dim
snake_case__ :Dict = attention_ratio
snake_case__ :List[str] = resolution
snake_case__ :List[str] = pool_size
snake_case__ :Optional[Any] = downsample_patch_size
snake_case__ :Dict = downsample_stride
snake_case__ :Dict = downsample_pad
snake_case__ :Optional[int] = drop_path_rate
snake_case__ :str = num_metaad_blocks
snake_case__ :int = distillation
snake_case__ :Tuple = use_layer_scale
snake_case__ :List[Any] = layer_scale_init_value
snake_case__ :Union[str, Any] = image_size
snake_case__ :Union[str, Any] = batch_norm_eps
| 701
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowercase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowercase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowercase__ )
return parser.parse_args()
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
snake_case__ :Dict = parse_args()
# Import training_script as a module.
snake_case__ :List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ :List[str] = script_fpath.stem
snake_case__ :Optional[Any] = importlib.import_module(lowercase__ )
# Patch sys.argv
snake_case__ :Any = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : List[Any] ) -> Dict:
'''simple docstring'''
for char in word:
snake_case__ :Tuple = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def lowercase_ ( __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Tuple = set()
for token in tokens:
snake_case__ :Tuple = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
snake_case__ :Any = list(lowerCAmelCase_ )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :str = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
snake_case__ :Tuple = bert_tokens
snake_case__ :Dict = 0, len(lowerCAmelCase_ )
while start < end:
snake_case__ :Any = True
if is_chinese(bert_word[start] ):
snake_case__ :int = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
snake_case__ :int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :Optional[int] = '''##''' + bert_word[j]
snake_case__ :Dict = start + i
snake_case__ :Dict = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> Tuple:
'''simple docstring'''
snake_case__ :List[Any] = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_00 ):
snake_case__ :Any = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :List[Any] = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
snake_case__ :Dict = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_00 ):
snake_case__ :Dict = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
snake_case__ :Optional[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Optional[int] = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
snake_case__ :Dict = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ :Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
snake_case__ :Tuple = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def lowercase_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :int = f.readlines()
snake_case__ :int = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Union[str, Any] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Any = BertTokenizer.from_pretrained(args.bert )
snake_case__ :Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :Dict = [json.dumps(lowerCAmelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__UpperCAmelCase : str = get_tests_dir("fixtures")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :Union[str, Any] = 500
snake_case__ :str = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ :List[str] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=lowercase__ ) as mock_head:
snake_case__ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCAmelCase_ ( cls ) -> Any:
snake_case__ :List[Any] = TOKEN
HfFolder.save_token(lowercase__ )
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
snake_case__ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ ,repo_id="test-feature-extractor" ,push_to_hub=lowercase__ ,use_auth_token=self._token )
snake_case__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
snake_case__ :Any = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=lowercase__ ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) )
def lowerCAmelCase_ ( self ) -> List[Any]:
CustomFeatureExtractor.register_for_auto_class()
snake_case__ :List[Any] = CustomFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
snake_case__ :str = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' ,trust_remote_code=lowercase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
def lowercase_ ( __snake_case : List[Any] ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[int] = len(a_ )
for i in range(length - 1 ):
snake_case__ :Dict = i
for k in range(i + 1 , a_ ):
if collection[k] < collection[least]:
snake_case__ :int = k
if least != i:
snake_case__ :List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case ( _A ):
_A = "mra"
def __init__( self ,UpperCamelCase=50_265 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=1 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase="absolute" ,UpperCamelCase=4 ,UpperCamelCase="full" ,UpperCamelCase=0 ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=0 ,UpperCamelCase=2 ,**UpperCamelCase ,) -> Any:
super().__init__(pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase )
snake_case__ :Union[str, Any] = vocab_size
snake_case__ :List[str] = max_position_embeddings
snake_case__ :List[Any] = hidden_size
snake_case__ :Any = num_hidden_layers
snake_case__ :Optional[int] = num_attention_heads
snake_case__ :List[Any] = intermediate_size
snake_case__ :Union[str, Any] = hidden_act
snake_case__ :int = hidden_dropout_prob
snake_case__ :int = attention_probs_dropout_prob
snake_case__ :List[Any] = initializer_range
snake_case__ :List[Any] = type_vocab_size
snake_case__ :str = layer_norm_eps
snake_case__ :List[str] = position_embedding_type
snake_case__ :Tuple = block_per_row
snake_case__ :Optional[int] = approx_mode
snake_case__ :Optional[Any] = initial_prior_first_n_blocks
snake_case__ :Union[str, Any] = initial_prior_diagonal_n_blocks
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _snake_case ( _UpperCamelCase ):
_A = "altclip_text_model"
def __init__( self ,UpperCamelCase=250_002 ,UpperCamelCase=1_024 ,UpperCamelCase=24 ,UpperCamelCase=16 ,UpperCamelCase=4_096 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=514 ,UpperCamelCase=1 ,UpperCamelCase=0.02 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-05 ,UpperCamelCase=1 ,UpperCamelCase=0 ,UpperCamelCase=2 ,UpperCamelCase="absolute" ,UpperCamelCase=True ,UpperCamelCase=768 ,**UpperCamelCase ,) -> Optional[int]:
super().__init__(pad_token_id=__a ,bos_token_id=__a ,eos_token_id=__a ,**__a )
snake_case__ :Union[str, Any] = vocab_size
snake_case__ :Optional[int] = hidden_size
snake_case__ :Tuple = num_hidden_layers
snake_case__ :Tuple = num_attention_heads
snake_case__ :int = hidden_act
snake_case__ :Any = intermediate_size
snake_case__ :Union[str, Any] = hidden_dropout_prob
snake_case__ :Dict = attention_probs_dropout_prob
snake_case__ :List[str] = max_position_embeddings
snake_case__ :List[str] = type_vocab_size
snake_case__ :Dict = initializer_range
snake_case__ :List[str] = initializer_factor
snake_case__ :Union[str, Any] = layer_norm_eps
snake_case__ :Tuple = position_embedding_type
snake_case__ :Optional[Any] = use_cache
snake_case__ :Optional[int] = project_dim
class _snake_case ( _UpperCamelCase ):
_A = "altclip_vision_model"
def __init__( self ,UpperCamelCase=768 ,UpperCamelCase=3_072 ,UpperCamelCase=512 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3 ,UpperCamelCase=224 ,UpperCamelCase=32 ,UpperCamelCase="quick_gelu" ,UpperCamelCase=1E-5 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1.0 ,**UpperCamelCase ,) -> Any:
super().__init__(**__a )
snake_case__ :int = hidden_size
snake_case__ :Optional[int] = intermediate_size
snake_case__ :Any = projection_dim
snake_case__ :Tuple = num_hidden_layers
snake_case__ :List[str] = num_attention_heads
snake_case__ :List[Any] = num_channels
snake_case__ :Dict = patch_size
snake_case__ :Any = image_size
snake_case__ :List[str] = initializer_range
snake_case__ :List[str] = initializer_factor
snake_case__ :Optional[Any] = attention_dropout
snake_case__ :Union[str, Any] = layer_norm_eps
snake_case__ :Dict = hidden_act
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,**UpperCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
snake_case__ :int = cls.get_config_dict(__a ,**__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
snake_case__ :Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a ,**__a )
class _snake_case ( _UpperCamelCase ):
_A = "altclip"
_A = True
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=768 ,UpperCamelCase=2.6592 ,**UpperCamelCase ) -> List[Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case__ :Tuple = kwargs.pop("text_config_dict" ,__a )
snake_case__ :Tuple = kwargs.pop("vision_config_dict" ,__a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case__ :int = {}
# This is the complete result when using `text_config_dict`.
snake_case__ :Any = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case__ :Optional[int] = (
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ :Union[str, Any] = (
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case__ :int = {}
# This is the complete result when using `vision_config_dict`.
snake_case__ :List[Any] = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case__ :Dict = {
str(__a ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case__ :Optional[int] = (
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ :Optional[Any] = (
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case__ :Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
snake_case__ :Dict = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
snake_case__ :Union[str, Any] = AltCLIPTextConfig(**__a )
snake_case__ :Dict = AltCLIPVisionConfig(**__a )
snake_case__ :Any = projection_dim
snake_case__ :Union[str, Any] = logit_scale_init_value
snake_case__ :Optional[Any] = 1.0
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__a )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case__ :List[str] = self.text_config.to_dict()
snake_case__ :int = self.vision_config.to_dict()
snake_case__ :Optional[int] = self.__class__.model_type
return output
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
_A = "bit"
_A = ["preactivation", "bottleneck"]
_A = ["SAME", "VALID"]
def __init__( self ,UpperCamelCase=3 ,UpperCamelCase=64 ,UpperCamelCase=[256, 512, 1_024, 2_048] ,UpperCamelCase=[3, 4, 6, 3] ,UpperCamelCase="preactivation" ,UpperCamelCase="relu" ,UpperCamelCase=None ,UpperCamelCase=32 ,UpperCamelCase=0.0 ,UpperCamelCase=False ,UpperCamelCase=32 ,UpperCamelCase=1 ,UpperCamelCase=None ,UpperCamelCase=None ,**UpperCamelCase ,) -> List[Any]:
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ :List[Any] = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
snake_case__ :str = num_channels
snake_case__ :Dict = embedding_size
snake_case__ :Tuple = hidden_sizes
snake_case__ :List[Any] = depths
snake_case__ :List[str] = layer_type
snake_case__ :Optional[int] = hidden_act
snake_case__ :Union[str, Any] = global_padding
snake_case__ :int = num_groups
snake_case__ :Optional[Any] = drop_path_rate
snake_case__ :Any = embedding_dynamic_padding
snake_case__ :Union[str, Any] = output_stride
snake_case__ :List[str] = width_factor
snake_case__ :str = ["stem"] + [f'stage{idx}' for idx in range(1 ,len(_snake_case ) + 1 )]
snake_case__ , snake_case__ :List[Any] = get_aligned_output_features_output_indices(
out_features=_snake_case ,out_indices=_snake_case ,stage_names=self.stage_names )
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=4 ,) -> Dict:
snake_case__ :Union[str, Any] = parent
snake_case__ :Optional[int] = batch_size
snake_case__ :str = seq_length
snake_case__ :Tuple = is_training
snake_case__ :Optional[Any] = use_attention_mask
snake_case__ :str = use_token_type_ids
snake_case__ :Optional[Any] = use_labels
snake_case__ :Dict = vocab_size
snake_case__ :Union[str, Any] = hidden_size
snake_case__ :str = num_hidden_layers
snake_case__ :str = num_attention_heads
snake_case__ :int = intermediate_size
snake_case__ :Union[str, Any] = hidden_act
snake_case__ :Optional[int] = hidden_dropout_prob
snake_case__ :Optional[Any] = attention_probs_dropout_prob
snake_case__ :Optional[int] = max_position_embeddings
snake_case__ :List[str] = type_vocab_size
snake_case__ :Union[str, Any] = type_sequence_label_size
snake_case__ :Tuple = initializer_range
snake_case__ :Any = num_choices
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :Optional[Any] = None
if self.use_attention_mask:
snake_case__ :int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :int = None
if self.use_token_type_ids:
snake_case__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ :List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[str] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Tuple = config_and_inputs
snake_case__ :Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Optional[Any] = config_and_inputs
snake_case__ :Tuple = True
snake_case__ :Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ :Any = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( _lowerCAmelCase , unittest.TestCase ):
_A = True
_A = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case__ :Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=_lowerCAmelCase )
snake_case__ :Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=_lowerCAmelCase )
snake_case__ :Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ,dtype=jnp.intaa )
snake_case__ :Union[str, Any] = model(_lowerCAmelCase )[0]
snake_case__ :List[Any] = [1, 11, 50_265]
self.assertEqual(list(output.shape ) ,_lowerCAmelCase )
# compare the actual values for a slice.
snake_case__ :Union[str, Any] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Dict = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=_lowerCAmelCase )
snake_case__ :str = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ,dtype=jnp.intaa )
snake_case__ :List[Any] = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
snake_case__ :List[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
from math import ceil
def lowercase_ ( __snake_case : Optional[int] , __snake_case : str ) -> int:
'''simple docstring'''
snake_case__ :Optional[int] = list(range(0 , snake_case_ ) )
snake_case__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
snake_case__ :List[str] = [i for i in blocks if i not in device_map_blocks]
snake_case__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(snake_case_ ) )
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = list(range(snake_case_ ) )
snake_case__ :Tuple = int(ceil(n_layers / len(snake_case_ ) ) )
snake_case__ :int = [layers[i : i + n_blocks] for i in range(0 , snake_case_ , snake_case_ )]
return dict(zip(snake_case_ , snake_case_ ) )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=[0.5, 0.5, 0.5] ,UpperCamelCase=[0.5, 0.5, 0.5] ,UpperCamelCase=False ,) -> str:
snake_case__ :Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case__ :Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ :Any = parent
snake_case__ :List[Any] = batch_size
snake_case__ :Tuple = num_channels
snake_case__ :int = image_size
snake_case__ :List[Any] = min_resolution
snake_case__ :List[Any] = max_resolution
snake_case__ :List[Any] = do_resize
snake_case__ :int = size
snake_case__ :int = do_center_crop
snake_case__ :int = crop_size
snake_case__ :Tuple = do_normalize
snake_case__ :List[str] = image_mean
snake_case__ :Any = image_std
snake_case__ :Union[str, Any] = do_reduce_labels
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :int = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
snake_case__ :Any = Image.open(dataset[0]["file"] )
snake_case__ :List[Any] = Image.open(dataset[1]["file"] )
return image, map
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
snake_case__ :Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
snake_case__ :List[str] = Image.open(ds[0]["file"] )
snake_case__ :int = Image.open(ds[1]["file"] )
snake_case__ :Any = Image.open(ds[2]["file"] )
snake_case__ :Dict = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_A = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :str = BeitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ ,"image_std" ) )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,UpperCamelCase__ )
snake_case__ :Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=UpperCamelCase__ )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,UpperCamelCase__ )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ ,Image.Image )
# Test not batched input
snake_case__ :Any = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :List[Any] = image_processing(UpperCamelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
snake_case__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase__ ,numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ ,np.ndarray )
# Test not batched input
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :Dict = image_processing(UpperCamelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> str:
# Initialize image_processing
snake_case__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase__ ,torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ ,torch.Tensor )
# Test not batched input
snake_case__ :Tuple = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
snake_case__ :Tuple = image_processing(UpperCamelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
snake_case__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase__ ,torchify=UpperCamelCase__ )
snake_case__ :int = []
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case__ :Tuple = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
snake_case__ :Any = image_processing(UpperCamelCase__ ,UpperCamelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case__ :Optional[Any] = prepare_semantic_single_inputs()
snake_case__ :Dict = image_processing(UpperCamelCase__ ,UpperCamelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
snake_case__ :List[str] = prepare_semantic_batch_inputs()
snake_case__ :List[str] = image_processing(UpperCamelCase__ ,UpperCamelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case__ :List[Any] = prepare_semantic_single_inputs()
snake_case__ :int = image_processing(UpperCamelCase__ ,UpperCamelCase__ ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
snake_case__ :Optional[int] = True
snake_case__ :str = image_processing(UpperCamelCase__ ,UpperCamelCase__ ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
def lowercase_ ( __snake_case : int = 10_00 ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = -1
snake_case__ :Tuple = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case__ :Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case__ :Any = n - a - b
if c * c == (a * a + b * b):
snake_case__ :List[Any] = a * b * c
if candidate >= product:
snake_case__ :Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase : int = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase : Dict = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Any = SavedModel()
snake_case__ :str = []
with open(os.path.join(a__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
snake_case__ :Dict = json.load(a__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a__ )] )
with open(a__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ :List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ :Tuple = sorted(a__ )
snake_case__ :Union[str, Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a__ )
if strict and len(a__ ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(a__ ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*a__ , sep="\n" )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__UpperCAmelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 713
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase_ ( __snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase_ ( ) -> Dict:
'''simple docstring'''
snake_case__ :Tuple = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=UpperCamelCase__ )
snake_case__ :Union[str, Any] = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
snake_case__ , snake_case__ :int = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , "func" ):
parser.print_help()
exit(1 )
snake_case__ :List[Any] = parse_unknown_args(UpperCamelCase__ )
# Run
snake_case__ :List[Any] = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _snake_case ( __UpperCAmelCase ):
_A = '''owlvit_text_model'''
def __init__( self ,UpperCamelCase=49_408 ,UpperCamelCase=512 ,UpperCamelCase=2_048 ,UpperCamelCase=12 ,UpperCamelCase=8 ,UpperCamelCase=16 ,UpperCamelCase="quick_gelu" ,UpperCamelCase=1E-5 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1.0 ,UpperCamelCase=0 ,UpperCamelCase=49_406 ,UpperCamelCase=49_407 ,**UpperCamelCase ,) -> List[str]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE ,bos_token_id=__SCREAMING_SNAKE_CASE ,eos_token_id=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ :Optional[int] = vocab_size
snake_case__ :Tuple = hidden_size
snake_case__ :Union[str, Any] = intermediate_size
snake_case__ :int = num_hidden_layers
snake_case__ :Dict = num_attention_heads
snake_case__ :Tuple = max_position_embeddings
snake_case__ :Any = hidden_act
snake_case__ :Optional[Any] = layer_norm_eps
snake_case__ :List[Any] = attention_dropout
snake_case__ :Optional[int] = initializer_range
snake_case__ :str = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ :Tuple = cls.get_config_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
snake_case__ :Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
class _snake_case ( __UpperCAmelCase ):
_A = '''owlvit_vision_model'''
def __init__( self ,UpperCamelCase=768 ,UpperCamelCase=3_072 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3 ,UpperCamelCase=768 ,UpperCamelCase=32 ,UpperCamelCase="quick_gelu" ,UpperCamelCase=1E-5 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1.0 ,**UpperCamelCase ,) -> str:
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ :Dict = hidden_size
snake_case__ :List[str] = intermediate_size
snake_case__ :int = num_hidden_layers
snake_case__ :Dict = num_attention_heads
snake_case__ :Union[str, Any] = num_channels
snake_case__ :str = image_size
snake_case__ :Union[str, Any] = patch_size
snake_case__ :Optional[Any] = hidden_act
snake_case__ :int = layer_norm_eps
snake_case__ :Tuple = attention_dropout
snake_case__ :str = initializer_range
snake_case__ :str = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,**UpperCamelCase ) -> List[Any]:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ :int = cls.get_config_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
snake_case__ :List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
class _snake_case ( __UpperCAmelCase ):
_A = '''owlvit'''
_A = True
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=512 ,UpperCamelCase=2.6592 ,UpperCamelCase=True ,**UpperCamelCase ,) -> str:
super().__init__(**__SCREAMING_SNAKE_CASE )
if text_config is None:
snake_case__ :List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
snake_case__ :Any = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
snake_case__ :int = OwlViTTextConfig(**__SCREAMING_SNAKE_CASE )
snake_case__ :List[str] = OwlViTVisionConfig(**__SCREAMING_SNAKE_CASE )
snake_case__ :Any = projection_dim
snake_case__ :List[Any] = logit_scale_init_value
snake_case__ :Optional[int] = return_dict
snake_case__ :List[Any] = 1.0
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,**UpperCamelCase ) -> int:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ :Any = cls.get_config_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) -> Dict:
snake_case__ :Optional[int] = {}
snake_case__ :Optional[Any] = text_config
snake_case__ :Optional[int] = vision_config
return cls.from_dict(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case__ :str = self.text_config.to_dict()
snake_case__ :Tuple = self.vision_config.to_dict()
snake_case__ :List[str] = self.__class__.model_type
return output
class _snake_case ( __UpperCAmelCase ):
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> str:
return 1E-4
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = -1 ,UpperCamelCase = -1 ,UpperCamelCase = None ,) -> Optional[int]:
snake_case__ :str = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=__SCREAMING_SNAKE_CASE ,seq_length=__SCREAMING_SNAKE_CASE ,framework=__SCREAMING_SNAKE_CASE )
snake_case__ :str = super().generate_dummy_inputs(
processor.image_processor ,batch_size=__SCREAMING_SNAKE_CASE ,framework=__SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return 14
| 715
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _snake_case :
_A = 42
_A = None
_A = None
def lowercase_ ( ) -> Node | None:
'''simple docstring'''
snake_case__ :str = Node(1 )
snake_case__ :Tuple = Node(2 )
snake_case__ :Optional[int] = Node(3 )
snake_case__ :List[str] = Node(4 )
snake_case__ :List[str] = Node(5 )
return tree
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase_ ( __snake_case : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
if root is None:
return output
snake_case__ :Optional[int] = deque([root] )
while process_queue:
snake_case__ :List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def lowercase_ ( __snake_case : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
snake_case__ :list[Sequence[Node | None]] = []
snake_case__ :List[Any] = 0
snake_case__ :int = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
snake_case__ :int = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
snake_case__ :Tuple = 0
return output
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Optional[int] = make_tree()
print(F'In-order Traversal: {inorder(__snake_case )}' )
print(F'Pre-order Traversal: {preorder(__snake_case )}' )
print(F'Post-order Traversal: {postorder(__snake_case )}' , "\n" )
print(F'Height of Tree: {height(__snake_case )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__snake_case ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__snake_case ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print("\nZigZag order Traversal: " )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=2 ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=10 ,UpperCamelCase=3 ,UpperCamelCase=32 * 4 ,UpperCamelCase=32 * 6 ,UpperCamelCase=4 ,UpperCamelCase=32 ,) -> str:
snake_case__ :int = parent
snake_case__ :Union[str, Any] = batch_size
snake_case__ :Dict = is_training
snake_case__ :Optional[Any] = use_auxiliary_loss
snake_case__ :Union[str, Any] = num_queries
snake_case__ :Dict = num_channels
snake_case__ :List[Any] = min_size
snake_case__ :int = max_size
snake_case__ :Optional[int] = num_labels
snake_case__ :Dict = mask_feature_size
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
snake_case__ :int = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__A )
snake_case__ :Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__A ) > 0.5
).float()
snake_case__ :Any = (torch.rand((self.batch_size, self.num_labels) ,device=__A ) > 0.5).long()
snake_case__ :List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = self.prepare_config_and_inputs()
snake_case__ :List[str] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Optional[int] = output.encoder_hidden_states
snake_case__ :Tuple = output.pixel_decoder_hidden_states
snake_case__ :Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) ,config.decoder_config.decoder_layers )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> Any:
with torch.no_grad():
snake_case__ :List[str] = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ :Optional[Any] = model(pixel_values=__A ,pixel_mask=__A )
snake_case__ :Optional[int] = model(__A ,output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A ,__A )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :List[Any] = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ :Optional[Any] = model(pixel_values=__A ,pixel_mask=__A )
snake_case__ :List[Any] = model(__A )
comm_check_on_output(__A )
snake_case__ :List[str] = model(
pixel_values=__A ,pixel_mask=__A ,mask_labels=__A ,class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_A = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Any = MaskFormerModelTester(self )
snake_case__ :Optional[int] = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def lowerCAmelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A ,**__A ,output_hidden_states=__A )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase_ ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ , snake_case__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Any = model_class(__A )
snake_case__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Optional[Any] = [*signature.parameters.keys()]
snake_case__ :List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__A )
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case__ :Tuple = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[str] = (self.model_tester.min_size,) * 2
snake_case__ :int = {
"pixel_values": torch.randn((2, 3, *size) ,device=__A ),
"mask_labels": torch.randn((2, 10, *size) ,device=__A ),
"class_labels": torch.zeros(2 ,10 ,device=__A ).long(),
}
snake_case__ :Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
snake_case__ :Any = model(**__A )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A ,**__A ,output_hidden_states=__A )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ , snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :int = model_class(__A ).to(__A )
snake_case__ :int = model(**__A ,output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ :Optional[int] = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ :Any = model_class(__A )
model.to(__A )
model.train()
snake_case__ :Dict = model(__A ,mask_labels=__A ,class_labels=__A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ :Optional[Any] = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :Dict = self.model_tester.prepare_config_and_inputs()
snake_case__ :Tuple = True
snake_case__ :str = True
snake_case__ :int = model_class(__A )
model.to(__A )
model.train()
snake_case__ :Tuple = model(__A ,mask_labels=__A ,class_labels=__A )
snake_case__ :Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ :int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case__ :Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ :List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase : int = 1e-4
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__A )
snake_case__ :List[str] = self.default_image_processor
snake_case__ :Dict = prepare_img()
snake_case__ :List[Any] = image_processor(__A ,return_tensors="pt" ).to(__A )
snake_case__ :Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A ,(1, 3, 800, 1_088) )
with torch.no_grad():
snake_case__ :List[Any] = model(**__A )
snake_case__ :Tuple = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__A ,atol=__A ) )
snake_case__ :str = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__A ,atol=__A ) )
snake_case__ :Optional[int] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__A ,atol=__A ) )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__A )
.eval()
)
snake_case__ :str = self.default_image_processor
snake_case__ :List[Any] = prepare_img()
snake_case__ :str = image_processor(__A ,return_tensors="pt" ).to(__A )
snake_case__ :Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A ,(1, 3, 800, 1_088) )
with torch.no_grad():
snake_case__ :Union[str, Any] = model(**__A )
# masks_queries_logits
snake_case__ :Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
snake_case__ :str = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
snake_case__ :Any = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__A ,atol=__A ) )
# class_queries_logits
snake_case__ :Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ :List[Any] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__A ,atol=__A ) )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__A )
.eval()
)
snake_case__ :Union[str, Any] = self.default_image_processor
snake_case__ :List[str] = prepare_img()
snake_case__ :Dict = image_processor(__A ,return_tensors="pt" ).to(__A )
snake_case__ :List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A ,(1, 3, 800, 1_088) )
with torch.no_grad():
snake_case__ :Dict = model(**__A )
# masks_queries_logits
snake_case__ :int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
snake_case__ :Dict = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
snake_case__ :int = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__A ,atol=__A ) )
# class_queries_logits
snake_case__ :int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ :Any = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__A ,atol=__A ) )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__A )
.eval()
)
snake_case__ :Any = self.default_image_processor
snake_case__ :str = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="pt" ,)
snake_case__ :Optional[int] = inputs["pixel_values"].to(__A )
snake_case__ :List[str] = [el.to(__A ) for el in inputs["mask_labels"]]
snake_case__ :Tuple = [el.to(__A ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case__ :Dict = model(**__A )
self.assertTrue(outputs.loss is not None )
| 717
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Optional[int] = tau * frequency / samplerate
snake_case__ :Dict = sin(_lowerCamelCase )
snake_case__ :Optional[int] = cos(_lowerCamelCase )
snake_case__ :int = _sin / (2 * q_factor)
snake_case__ :List[Any] = (1 - _cos) / 2
snake_case__ :Dict = 1 - _cos
snake_case__ :Optional[int] = 1 + alpha
snake_case__ :List[str] = -2 * _cos
snake_case__ :Optional[Any] = 1 - alpha
snake_case__ :Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> List[Any]:
'''simple docstring'''
snake_case__ :str = tau * frequency / samplerate
snake_case__ :Optional[Any] = sin(_lowerCamelCase )
snake_case__ :List[str] = cos(_lowerCamelCase )
snake_case__ :Any = _sin / (2 * q_factor)
snake_case__ :Optional[int] = (1 + _cos) / 2
snake_case__ :int = -1 - _cos
snake_case__ :Optional[Any] = 1 + alpha
snake_case__ :int = -2 * _cos
snake_case__ :str = 1 - alpha
snake_case__ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> List[str]:
'''simple docstring'''
snake_case__ :Any = tau * frequency / samplerate
snake_case__ :List[Any] = sin(_lowerCamelCase )
snake_case__ :Union[str, Any] = cos(_lowerCamelCase )
snake_case__ :List[str] = _sin / (2 * q_factor)
snake_case__ :List[str] = _sin / 2
snake_case__ :str = 0
snake_case__ :int = -ba
snake_case__ :List[str] = 1 + alpha
snake_case__ :Any = -2 * _cos
snake_case__ :Optional[Any] = 1 - alpha
snake_case__ :List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Optional[int] = tau * frequency / samplerate
snake_case__ :List[Any] = sin(_lowerCamelCase )
snake_case__ :Dict = cos(_lowerCamelCase )
snake_case__ :Optional[int] = _sin / (2 * q_factor)
snake_case__ :List[str] = 1 - alpha
snake_case__ :Any = -2 * _cos
snake_case__ :Tuple = 1 + alpha
snake_case__ :str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = tau * frequency / samplerate
snake_case__ :Optional[Any] = sin(_lowerCamelCase )
snake_case__ :str = cos(_lowerCamelCase )
snake_case__ :Any = _sin / (2 * q_factor)
snake_case__ :Any = 10 ** (gain_db / 40)
snake_case__ :str = 1 + alpha * big_a
snake_case__ :str = -2 * _cos
snake_case__ :Optional[int] = 1 - alpha * big_a
snake_case__ :Optional[int] = 1 + alpha / big_a
snake_case__ :str = -2 * _cos
snake_case__ :Tuple = 1 - alpha / big_a
snake_case__ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> Dict:
'''simple docstring'''
snake_case__ :Union[str, Any] = tau * frequency / samplerate
snake_case__ :str = sin(_lowerCamelCase )
snake_case__ :List[Any] = cos(_lowerCamelCase )
snake_case__ :Dict = _sin / (2 * q_factor)
snake_case__ :Any = 10 ** (gain_db / 40)
snake_case__ :int = (big_a + 1) - (big_a - 1) * _cos
snake_case__ :str = (big_a + 1) + (big_a - 1) * _cos
snake_case__ :Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case__ :Dict = (big_a - 1) + (big_a + 1) * _cos
snake_case__ :int = 2 * sqrt(_lowerCamelCase ) * alpha
snake_case__ :str = big_a * (pmc + aaa)
snake_case__ :Optional[int] = 2 * big_a * mpc
snake_case__ :Optional[int] = big_a * (pmc - aaa)
snake_case__ :Optional[int] = ppmc + aaa
snake_case__ :List[Any] = -2 * pmpc
snake_case__ :List[str] = ppmc - aaa
snake_case__ :Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> Any:
'''simple docstring'''
snake_case__ :str = tau * frequency / samplerate
snake_case__ :Dict = sin(_lowerCamelCase )
snake_case__ :List[str] = cos(_lowerCamelCase )
snake_case__ :Tuple = _sin / (2 * q_factor)
snake_case__ :Optional[int] = 10 ** (gain_db / 40)
snake_case__ :str = (big_a + 1) - (big_a - 1) * _cos
snake_case__ :Optional[int] = (big_a + 1) + (big_a - 1) * _cos
snake_case__ :Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case__ :Dict = (big_a - 1) + (big_a + 1) * _cos
snake_case__ :Optional[Any] = 2 * sqrt(_lowerCamelCase ) * alpha
snake_case__ :Union[str, Any] = big_a * (ppmc + aaa)
snake_case__ :Tuple = -2 * big_a * pmpc
snake_case__ :str = big_a * (ppmc - aaa)
snake_case__ :List[str] = pmc + aaa
snake_case__ :Tuple = 2 * mpc
snake_case__ :List[str] = pmc - aaa
snake_case__ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 718
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _snake_case ( _A ):
_A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**UpperCamelCase ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ :List[Any] = deprecated_arg[3:]
setattr(self ,A__ ,not kwargs.pop(A__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
snake_case__ :Union[str, Any] = kwargs.pop("torchscript" ,self.torchscript )
snake_case__ :Union[str, Any] = kwargs.pop("torch_xla_tpu_print_metrics" ,self.torch_xla_tpu_print_metrics )
snake_case__ :Optional[int] = kwargs.pop("fp16_opt_level" ,self.fpaa_opt_level )
super().__init__(**A__ )
_A = field(default=_A , metadata={'help': 'Trace the models using torchscript'} )
_A = field(default=_A , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
_A = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
requires_backends(self ,["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
snake_case__ :int = torch.device("cpu" )
snake_case__ :Dict = 0
elif is_torch_tpu_available():
snake_case__ :Union[str, Any] = xm.xla_device()
snake_case__ :str = 0
else:
snake_case__ :Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case__ :str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self ) -> Dict:
requires_backends(self ,["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
requires_backends(self ,["torch"] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
requires_backends(self ,["torch"] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self ) -> Any:
return self.n_gpu > 0
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str , __snake_case : List[str]=True , __snake_case : str="pt" ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = {"add_prefix_space": True} if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not line.startswith(" " ) else {}
snake_case__ :Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase__ , padding="max_length" if pad_to_max_length else None , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , ) -> Dict:
'''simple docstring'''
snake_case__ :List[Any] = input_ids.ne(lowerCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="train" ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="" ,) -> List[Any]:
super().__init__()
snake_case__ :str = Path(_a ).joinpath(type_path + ".source" )
snake_case__ :Optional[Any] = Path(_a ).joinpath(type_path + ".target" )
snake_case__ :str = self.get_char_lens(self.src_file )
snake_case__ :Any = max_source_length
snake_case__ :str = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
snake_case__ :List[Any] = tokenizer
snake_case__ :int = prefix
if n_obs is not None:
snake_case__ :List[Any] = self.src_lens[:n_obs]
snake_case__ :int = src_lang
snake_case__ :Union[str, Any] = tgt_lang
def __len__( self ) -> Dict:
return len(self.src_lens )
def __getitem__( self ,UpperCamelCase ) -> List[str]:
snake_case__ :int = index + 1 # linecache starts at 1
snake_case__ :Any = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("\n" )
snake_case__ :str = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("\n" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ :Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
snake_case__ :List[str] = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
snake_case__ :List[Any] = encode_line(_a ,_a ,self.max_source_length ,"right" )
snake_case__ :Optional[Any] = encode_line(_a ,_a ,self.max_target_length ,"right" )
snake_case__ :str = source_inputs["input_ids"].squeeze()
snake_case__ :List[str] = target_inputs["input_ids"].squeeze()
snake_case__ :int = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> int:
return [len(_a ) for x in Path(_a ).open().readlines()]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
snake_case__ :Optional[Any] = torch.stack([x["input_ids"] for x in batch] )
snake_case__ :Tuple = torch.stack([x["attention_mask"] for x in batch] )
snake_case__ :Union[str, Any] = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case__ :Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
snake_case__ :int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
snake_case__ :Tuple = trim_batch(_a ,_a )
snake_case__ , snake_case__ :Tuple = trim_batch(_a ,_a ,attention_mask=_a )
snake_case__ :Dict = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__UpperCAmelCase : List[str] = getLogger(__name__)
def lowercase_ ( __snake_case : List[List] ) -> int:
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCAmelCase__ ) )
def lowercase_ ( __snake_case : str ) -> None:
'''simple docstring'''
snake_case__ :Dict = get_git_info()
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "git_log.json" ) )
def lowercase_ ( __snake_case : List[str] , __snake_case : str , __snake_case : List[str]=4 , **__snake_case : Any ) -> int:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ ( __snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase__ ) as f:
return json.load(lowerCAmelCase__ )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Dict = git.Repo(search_parent_directories=lowerCAmelCase__ )
snake_case__ :List[str] = {
"repo_id": str(lowerCAmelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowercase_ ( __snake_case : Callable , __snake_case : Iterable ) -> List:
'''simple docstring'''
return list(map(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase__ , "wb" ) as f:
return pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ ( __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
def remove_articles(__snake_case : Dict ):
return re.sub(R"\b(a|an|the)\b" , " " , lowerCAmelCase__ )
def white_space_fix(__snake_case : Tuple ):
return " ".join(text.split() )
def remove_punc(__snake_case : Tuple ):
snake_case__ :List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowercase_ ( __snake_case : int , __snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :List[Any] = normalize_answer(lowerCAmelCase__ ).split()
snake_case__ :List[Any] = normalize_answer(lowerCAmelCase__ ).split()
snake_case__ :Any = Counter(lowerCAmelCase__ ) & Counter(lowerCAmelCase__ )
snake_case__ :Tuple = sum(common.values() )
if num_same == 0:
return 0
snake_case__ :Union[str, Any] = 1.0 * num_same / len(lowerCAmelCase__ )
snake_case__ :int = 1.0 * num_same / len(lowerCAmelCase__ )
snake_case__ :Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> int:
'''simple docstring'''
return normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ )
def lowercase_ ( __snake_case : List[str] , __snake_case : List[str] ) -> Dict:
'''simple docstring'''
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
snake_case__ :Dict = 0
for hypo, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
em += exact_match_score(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
em /= len(lowerCAmelCase__ )
return {"em": em}
def lowercase_ ( __snake_case : int ) -> Tuple:
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ :Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ :List[Any] = "dropout_rate"
for p in extra_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and not hasattr(lowerCAmelCase__ , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
continue
snake_case__ :Dict = p if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) else equivalent_param[p]
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
return hparams, config
| 720
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : int = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Tuple ) -> str:
'''simple docstring'''
snake_case__ :List[str] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
snake_case__ :Optional[int] = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
snake_case__ :List[str] = "huggingface/label-files"
if "o365" in model_name:
snake_case__ :Optional[Any] = 3_66
snake_case__ :List[str] = "object365-id2label.json"
else:
snake_case__ :List[Any] = 91
snake_case__ :Any = "coco-detection-id2label.json"
snake_case__ :Union[str, Any] = num_labels
snake_case__ :Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
snake_case__ :List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
snake_case__ :str = idalabel
snake_case__ :Any = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :str = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def lowercase_ ( __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = dct.pop(_lowerCamelCase )
snake_case__ :List[Any] = val
def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case__ :str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ :List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ :Optional[Any] = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
snake_case__ :str = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ :List[str] = in_proj_weight[:dim, :]
snake_case__ :Tuple = in_proj_bias[: dim]
snake_case__ :Dict = in_proj_weight[
dim : dim * 2, :
]
snake_case__ :Tuple = in_proj_bias[
dim : dim * 2
]
snake_case__ :List[Any] = in_proj_weight[
-dim :, :
]
snake_case__ :Dict = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ :str = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
snake_case__ :Any = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case__ :Any = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ :Any = in_proj_weight[:hidden_size, :]
snake_case__ :str = in_proj_bias[:hidden_size]
snake_case__ :Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case__ :Dict = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ :Union[str, Any] = in_proj_weight[-hidden_size:, :]
snake_case__ :Tuple = in_proj_bias[-hidden_size:]
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ :Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
snake_case__ :Dict = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
snake_case__ :Union[str, Any] = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'Model name {model_name} not supported' )
snake_case__ :str = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
snake_case__ :Tuple = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case__ :Optional[int] = state_dict.pop(_lowerCamelCase )
snake_case__ :List[Any] = val
if "input_proj" in key:
snake_case__ :Union[str, Any] = state_dict.pop(_lowerCamelCase )
snake_case__ :Optional[int] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case__ :List[Any] = state_dict.pop(_lowerCamelCase )
snake_case__ :Tuple = val
# finally, create HuggingFace model and load state dict
snake_case__ :Dict = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
snake_case__ :Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_lowerCamelCase )
# load image processor
snake_case__ :Dict = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
snake_case__ :List[str] = prepare_img()
snake_case__ :List[str] = processor(images=_lowerCamelCase , return_tensors="pt" )
snake_case__ :Dict = encoding["pixel_values"]
snake_case__ :str = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
snake_case__ :str = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
snake_case__ :Dict = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
snake_case__ :Union[str, Any] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
snake_case__ :int = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : Dict ) -> str:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : int ) -> str:
'''simple docstring'''
for char in word:
snake_case__ :Dict = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowercase_ ( __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Any = set()
for token in tokens:
snake_case__ :Any = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
snake_case__ :str = list(__snake_case )
return word_list
def lowercase_ ( __snake_case : str , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(__snake_case ) for w in chinese_word_set] )
snake_case__ :Optional[int] = bert_tokens
snake_case__ , snake_case__ :Optional[int] = 0, len(__snake_case )
while start < end:
snake_case__ :Union[str, Any] = True
if is_chinese(bert_word[start] ):
snake_case__ :Union[str, Any] = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
snake_case__ :Dict = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :Union[str, Any] = "##" + bert_word[j]
snake_case__ :Any = start + i
snake_case__ :List[Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ :str = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :Any = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :Dict = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :Tuple = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :str = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Dict = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
snake_case__ :Dict = add_sub_symbol(__snake_case , __snake_case )
snake_case__ :Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
snake_case__ :List[str] = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowercase_ ( __snake_case : List[str] ) -> Dict:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :str = f.readlines()
snake_case__ :Any = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Union[str, Any] = LTP(args.ltp ) # faster in GPU device
snake_case__ :int = BertTokenizer.from_pretrained(args.bert )
snake_case__ :List[str] = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :Dict = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : Dict = parser.parse_args()
main(args)
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__UpperCAmelCase : Dict = "bert-base-cased"
__UpperCAmelCase : Union[str, Any] = "google/pegasus-xsum"
__UpperCAmelCase : List[str] = [" Sam ate lunch today.", "Sams lunch ingredients."]
__UpperCAmelCase : Optional[int] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
__UpperCAmelCase : List[Any] = "patrickvonplaten/t5-tiny-random"
__UpperCAmelCase : int = "sshleifer/bart-tiny-random"
__UpperCAmelCase : Optional[Any] = "sshleifer/tiny-mbart"
__UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-marian-en-de"
def lowercase_ ( __snake_case : Path , __snake_case : list ) -> Tuple:
'''simple docstring'''
snake_case__ :List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( __snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE_ , F'{split}.source' ) , SCREAMING_SNAKE_CASE_ )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE_ , F'{split}.target' ) , SCREAMING_SNAKE_CASE_ )
return tmp_dir
class _snake_case ( _UpperCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
snake_case__ :str = AutoTokenizer.from_pretrained(lowercase__ )
snake_case__ :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case__ :str = max(len(tokenizer.encode(lowercase__ ) ) for a in ARTICLES )
snake_case__ :Optional[int] = max(len(tokenizer.encode(lowercase__ ) ) for a in SUMMARIES )
snake_case__ :Union[str, Any] = 4
snake_case__ :Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
snake_case__ :Tuple = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
snake_case__ :str = SeqaSeqDataset(
lowercase__ ,data_dir=lowercase__ ,type_path="train" ,max_source_length=lowercase__ ,max_target_length=lowercase__ ,src_lang=lowercase__ ,tgt_lang=lowercase__ ,)
snake_case__ :Optional[Any] = DataLoader(lowercase__ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase__ ,lowercase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
snake_case__ :str = shift_tokens_right(batch["labels"] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :Dict = AutoTokenizer.from_pretrained(lowercase__ )
snake_case__ :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case__ :Dict = max(len(tokenizer.encode(lowercase__ ) ) for a in ARTICLES )
snake_case__ :Any = max(len(tokenizer.encode(lowercase__ ) ) for a in SUMMARIES )
snake_case__ :Union[str, Any] = 4
snake_case__ :Any = LegacySeqaSeqDataset(
lowercase__ ,data_dir=lowercase__ ,type_path="train" ,max_source_length=20 ,max_target_length=lowercase__ ,)
snake_case__ :Tuple = DataLoader(lowercase__ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[Any] = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
snake_case__ :Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
snake_case__ :Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
snake_case__ :Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase__ ,lowercase__ ,128 ,lowercase__ )
snake_case__ :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
snake_case__ :List[Any] = {x.name for x in save_dir.iterdir()}
snake_case__ :str = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase__ ) < len(lowercase__ )
assert len(lowercase__ ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="This test requires fairseq" )
def lowerCAmelCase_ ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
snake_case__ :List[Any] = self._get_dataset(max_len=64 )
snake_case__ :Optional[Any] = 64
snake_case__ :Dict = ds.make_dynamic_sampler(lowercase__ ,required_batch_size_multiple=lowercase__ )
snake_case__ :str = [len(lowercase__ ) for x in batch_sampler]
assert len(set(lowercase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase__ ) == len(lowercase__ ) # no dropped or added examples
snake_case__ :Optional[int] = DataLoader(lowercase__ ,batch_sampler=lowercase__ ,collate_fn=ds.collate_fn ,num_workers=2 )
snake_case__ :Any = []
snake_case__ :List[Any] = []
for batch in data_loader:
snake_case__ :List[str] = batch["input_ids"].shape
snake_case__ :Union[str, Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
snake_case__ :List[str] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(lowercase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase__ )
assert num_src_per_batch[0] == max(lowercase__ )
if failures:
raise AssertionError(f'too many tokens in {len(lowercase__ )} batches' )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = self._get_dataset(max_len=512 )
snake_case__ :str = 2
snake_case__ :Dict = ds.make_sortish_sampler(lowercase__ ,shuffle=lowercase__ )
snake_case__ :Dict = DataLoader(lowercase__ ,batch_size=lowercase__ ,collate_fn=ds.collate_fn ,num_workers=2 )
snake_case__ :str = DataLoader(lowercase__ ,batch_size=lowercase__ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=lowercase__ )
snake_case__ :Dict = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase ,UpperCamelCase="input_ids" ):
return [batch[k].eq(lowercase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase__ ,k="labels" ) ) < sum(count_pad_tokens(lowercase__ ,k="labels" ) )
assert sum(count_pad_tokens(lowercase__ ) ) < sum(count_pad_tokens(lowercase__ ) )
assert len(lowercase__ ) == len(lowercase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase=1_000 ,UpperCamelCase=128 ) -> Dict:
if os.getenv("USE_REAL_DATA" ,lowercase__ ):
snake_case__ :str = "examples/seq2seq/wmt_en_ro"
snake_case__ :Union[str, Any] = max_len * 2 * 64
if not Path(lowercase__ ).joinpath("train.len" ).exists():
save_len_file(lowercase__ ,lowercase__ )
else:
snake_case__ :Optional[Any] = "examples/seq2seq/test_data/wmt_en_ro"
snake_case__ :Tuple = max_len * 4
save_len_file(lowercase__ ,lowercase__ )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
snake_case__ :str = SeqaSeqDataset(
lowercase__ ,data_dir=lowercase__ ,type_path="train" ,max_source_length=lowercase__ ,max_target_length=lowercase__ ,n_obs=lowercase__ ,)
return ds, max_tokens, tokenizer
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = self._get_dataset()
snake_case__ :Union[str, Any] = set(DistributedSortishSampler(lowercase__ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=lowercase__ ) )
snake_case__ :Any = set(DistributedSortishSampler(lowercase__ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=lowercase__ ) )
assert idsa.intersection(lowercase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict:
snake_case__ :Any = AutoTokenizer.from_pretrained(lowercase__ ,use_fast=lowercase__ )
if tok_name == MBART_TINY:
snake_case__ :Tuple = SeqaSeqDataset(
lowercase__ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,src_lang="EN" ,tgt_lang="FR" ,)
snake_case__ :str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
snake_case__ :str = SeqaSeqDataset(
lowercase__ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,)
snake_case__ :Tuple = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase__ ) == 1 if tok_name == BART_TINY else len(lowercase__ ) == 0
| 701
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( _A ):
_A = ''
_A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_A = None # compression type in fsspec. ex: "gzip"
_A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self ,UpperCamelCase = "" ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ) -> Optional[int]:
super().__init__(self ,**UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ :Tuple = fsspec.open(
UpperCamelCase ,mode="rb" ,protocol=UpperCamelCase ,compression=self.compression ,client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
snake_case__ :Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
snake_case__ :Optional[int] = (
self.compressed_name[: self.compressed_name.rindex("." )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case__ :Optional[int] = None
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase ) -> Optional[Any]:
return super()._strip_protocol(UpperCamelCase ).lstrip("/" )
def lowerCAmelCase_ ( self ) -> List[Any]:
if self.dir_cache is None:
snake_case__ :List[str] = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case__ :Optional[int] = {f['name']: f}
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
return self.file.open().read()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> str:
snake_case__ :List[str] = self._strip_protocol(UpperCamelCase )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _snake_case ( _A ):
_A = 'bz2'
_A = 'bz2'
_A = '.bz2'
class _snake_case ( _A ):
_A = 'gzip'
_A = 'gzip'
_A = '.gz'
class _snake_case ( _A ):
_A = 'lz4'
_A = 'lz4'
_A = '.lz4'
class _snake_case ( _A ):
_A = 'xz'
_A = 'xz'
_A = '.xz'
class _snake_case ( _A ):
_A = 'zstd'
_A = 'zstd'
_A = '.zst'
def __init__( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = DEFAULT_BLOCK_SIZE ,**UpperCamelCase ,) -> List[Any]:
super().__init__(
fo=UpperCamelCase ,mode=UpperCamelCase ,target_protocol=UpperCamelCase ,target_options=UpperCamelCase ,block_size=UpperCamelCase ,**UpperCamelCase ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ :Dict = self.file.__enter__
class _snake_case :
def __init__( self ,UpperCamelCase ) -> Tuple:
snake_case__ :Union[str, Any] = file_
def __enter__( self ) -> Any:
self._file.__enter__()
return self
def __exit__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
self._file.__exit__(*UpperCamelCase ,**UpperCamelCase )
def __iter__( self ) -> Optional[int]:
return iter(self._file )
def lowerCAmelCase_ ( self ) -> List[str]:
return next(self._file )
def __getattr__( self ,UpperCamelCase ) -> List[str]:
return getattr(self._file ,UpperCamelCase )
def fixed_enter(*UpperCamelCase ,**UpperCamelCase ):
return WrappedFile(_enter(*UpperCamelCase ,**UpperCamelCase ) )
snake_case__ :List[Any] = fixed_enter
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( __A , unittest.TestCase ):
_A = BertTokenizer
_A = BertTokenizerFast
_A = True
_A = True
_A = filter_non_english
def lowerCAmelCase_ ( self ) -> int:
super().setUp()
snake_case__ :int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ :Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :int = 'UNwant\u00E9d,running'
snake_case__ :Optional[int] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = self.tokenizer_class(self.vocab_file )
snake_case__ :int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,[9, 6, 7, 12, 10, 11] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case__ :List[str] = self.get_tokenizer()
snake_case__ :Optional[Any] = self.get_rust_tokenizer()
snake_case__ :Optional[Any] = 'UNwant\u00E9d,running'
snake_case__ :str = tokenizer.tokenize(UpperCamelCase )
snake_case__ :Dict = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :Dict = rust_tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = self.get_rust_tokenizer()
snake_case__ :Optional[int] = tokenizer.encode(UpperCamelCase )
snake_case__ :Any = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
# With lower casing
snake_case__ :Union[str, Any] = self.get_tokenizer(do_lower_case=UpperCamelCase )
snake_case__ :Tuple = self.get_rust_tokenizer(do_lower_case=UpperCamelCase )
snake_case__ :List[Any] = 'UNwant\u00E9d,running'
snake_case__ :Optional[int] = tokenizer.tokenize(UpperCamelCase )
snake_case__ :Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[int] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :List[Any] = rust_tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :str = self.get_rust_tokenizer()
snake_case__ :Tuple = tokenizer.encode(UpperCamelCase )
snake_case__ :Union[str, Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) ,["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Any = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase ,strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["h\u00E9llo"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase ,strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Optional[int] = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[Any] = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase ,strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Dict = BasicTokenizer(do_lower_case=UpperCamelCase ,strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase ,never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) ,["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Any = BasicTokenizer()
snake_case__ :int = 'a\n\'ll !!to?\'d of, can\'t.'
snake_case__ :Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(UpperCamelCase ) ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case__ :Optional[Any] = {}
for i, token in enumerate(UpperCamelCase ):
snake_case__ :Dict = i
snake_case__ :Any = WordpieceTokenizer(vocab=UpperCamelCase ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) ,["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) ,["[UNK]", "runn", "##ing"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[int] = self.get_tokenizer()
snake_case__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase ) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase ) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]] )
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
snake_case__ :Tuple = tokenizer.encode("sequence builders" ,add_special_tokens=UpperCamelCase )
snake_case__ :int = tokenizer.encode("multi-sequence build" ,add_special_tokens=UpperCamelCase )
snake_case__ :Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
snake_case__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ,UpperCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase_ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case__ :int = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
snake_case__ :Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
snake_case__ :Union[str, Any] = tokenizer_r.encode_plus(
UpperCamelCase ,return_attention_mask=UpperCamelCase ,return_token_type_ids=UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase ,)
snake_case__ :List[Any] = tokenizer_r.do_lower_case if hasattr(UpperCamelCase ,"do_lower_case" ) else False
snake_case__ :List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["offset_mapping"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = ['的', '人', '有']
snake_case__ :int = ''.join(UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case__ :int = True
snake_case__ :List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
snake_case__ :Tuple = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
snake_case__ :Any = tokenizer_p.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :int = tokenizer_r.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :Optional[Any] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase )
snake_case__ :Any = tokenizer_p.convert_ids_to_tokens(UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :str = False
snake_case__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
snake_case__ :List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
snake_case__ :Union[str, Any] = tokenizer_r.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :Tuple = tokenizer_p.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
snake_case__ :Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase )
snake_case__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ :Dict = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(UpperCamelCase )
]
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase : int = "RegNetConfig"
# Base docstring
__UpperCAmelCase : int = "facebook/regnet-y-040"
__UpperCAmelCase : int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__UpperCAmelCase : Union[str, Any] = "facebook/regnet-y-040"
__UpperCAmelCase : Any = "tabby, tabby cat"
__UpperCAmelCase : Optional[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase = 3 ,UpperCamelCase = 1 ,UpperCamelCase = 1 ,UpperCamelCase = "relu" ,**UpperCamelCase ,) -> Dict:
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case__ :str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case__ :Tuple = tf.keras.layers.ConvaD(
filters=_A ,kernel_size=_A ,strides=_A ,padding="VALID" ,groups=_A ,use_bias=_A ,name="convolution" ,)
snake_case__ :Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
snake_case__ :Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :Tuple = self.convolution(self.padding(_A ) )
snake_case__ :str = self.normalization(_A )
snake_case__ :Dict = self.activation(_A )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,**UpperCamelCase ) -> Optional[int]:
super().__init__(**_A )
snake_case__ :Dict = config.num_channels
snake_case__ :str = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :int = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case__ :Tuple = tf.transpose(_A ,perm=(0, 2, 3, 1) )
snake_case__ :str = self.embedder(_A )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase = 2 ,**UpperCamelCase ) -> Dict:
super().__init__(**_A )
snake_case__ :List[Any] = tf.keras.layers.ConvaD(
filters=_A ,kernel_size=1 ,strides=_A ,use_bias=_A ,name="convolution" )
snake_case__ :int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ) -> str:
return self.normalization(self.convolution(_A ) ,training=_A )
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) -> Tuple:
super().__init__(**_A )
snake_case__ :Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A ,name="pooler" )
snake_case__ :str = [
tf.keras.layers.ConvaD(filters=_A ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=_A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Any:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case__ :int = self.pooler(_A )
for layer_module in self.attention:
snake_case__ :Tuple = layer_module(_A )
snake_case__ :int = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 1 ,**UpperCamelCase ) -> List[str]:
super().__init__(**_A )
snake_case__ :int = in_channels != out_channels or stride != 1
snake_case__ :Union[str, Any] = max(1 ,out_channels // config.groups_width )
snake_case__ :List[Any] = (
TFRegNetShortCut(_A ,stride=_A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case__ :Tuple = [
TFRegNetConvLayer(_A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_A ,stride=_A ,groups=_A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(_A ,kernel_size=1 ,activation=_A ,name="layer.2" ),
]
snake_case__ :Tuple = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :List[str] = hidden_state
for layer_module in self.layers:
snake_case__ :Tuple = layer_module(_A )
snake_case__ :str = self.shortcut(_A )
hidden_state += residual
snake_case__ :List[str] = self.activation(_A )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 1 ,**UpperCamelCase ) -> Optional[Any]:
super().__init__(**_A )
snake_case__ :int = in_channels != out_channels or stride != 1
snake_case__ :str = max(1 ,out_channels // config.groups_width )
snake_case__ :Union[str, Any] = (
TFRegNetShortCut(_A ,stride=_A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
snake_case__ :int = [
TFRegNetConvLayer(_A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_A ,stride=_A ,groups=_A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(_A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(_A ,kernel_size=1 ,activation=_A ,name="layer.3" ),
]
snake_case__ :Tuple = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Optional[int] = hidden_state
for layer_module in self.layers:
snake_case__ :int = layer_module(_A )
snake_case__ :List[Any] = self.shortcut(_A )
hidden_state += residual
snake_case__ :List[Any] = self.activation(_A )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 2 ,UpperCamelCase = 2 ,**UpperCamelCase ) -> Union[str, Any]:
super().__init__(**_A )
snake_case__ :Optional[Any] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
snake_case__ :Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(_A ,_A ,_A ,stride=_A ,name="layers.0" ),
*[layer(_A ,_A ,_A ,name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
for layer_module in self.layers:
snake_case__ :Dict = layer_module(_A )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self ,UpperCamelCase ,**UpperCamelCase ) -> List[str]:
super().__init__(**_A )
snake_case__ :Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
snake_case__ :Optional[Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A ,_A ,_A ,depth=_A ,name=f'stages.{i+1}' ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = True ) -> Optional[int]:
snake_case__ :str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ :str = hidden_states + (hidden_state,)
snake_case__ :Optional[int] = stage_module(_A )
if output_hidden_states:
snake_case__ :int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A ,hidden_states=_A )
@keras_serializable
class _snake_case ( tf.keras.layers.Layer ):
_A = RegNetConfig
def __init__( self ,UpperCamelCase ,**UpperCamelCase ) -> List[str]:
super().__init__(**_A )
snake_case__ :Optional[Any] = config
snake_case__ :List[str] = TFRegNetEmbeddings(_A ,name="embedder" )
snake_case__ :List[Any] = TFRegNetEncoder(_A ,name="encoder" )
snake_case__ :Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A ,name="pooler" )
@unpack_inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = False ,) -> Tuple:
snake_case__ :str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ :str = self.embedder(_A ,training=_A )
snake_case__ :Dict = self.encoder(
_A ,output_hidden_states=_A ,return_dict=_A ,training=_A )
snake_case__ :str = encoder_outputs[0]
snake_case__ :Union[str, Any] = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
snake_case__ :List[Any] = tf.transpose(_A ,perm=(0, 3, 1, 2) )
snake_case__ :Tuple = tf.transpose(_A ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case__ :Union[str, Any] = tuple([tf.transpose(_A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A ,pooler_output=_A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class _snake_case ( __lowercase ):
_A = RegNetConfig
_A = 'regnet'
_A = 'pixel_values'
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
__UpperCAmelCase : int = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : List[Any] = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowercase , )
class _snake_case ( __lowercase ):
def __init__( self ,UpperCamelCase ,*UpperCamelCase ,**UpperCamelCase ) -> List[str]:
super().__init__(_A ,*_A ,**_A )
snake_case__ :Tuple = TFRegNetMainLayer(_A ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=False ,) -> Dict:
snake_case__ :Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ :str = self.regnet(
pixel_values=_A ,output_hidden_states=_A ,return_dict=_A ,training=_A ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowercase , )
class _snake_case ( __lowercase , __lowercase ):
def __init__( self ,UpperCamelCase ,*UpperCamelCase ,**UpperCamelCase ) -> Any:
super().__init__(_A ,*_A ,**_A )
snake_case__ :Union[str, Any] = config.num_labels
snake_case__ :List[Any] = TFRegNetMainLayer(_A ,name="regnet" )
# classification head
snake_case__ :int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase_ ( self ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=False ,) -> Dict:
snake_case__ :Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ :Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ :Optional[Any] = self.regnet(
_A ,output_hidden_states=_A ,return_dict=_A ,training=_A )
snake_case__ :Optional[int] = outputs.pooler_output if return_dict else outputs[1]
snake_case__ :Dict = self.classifier[0](_A )
snake_case__ :List[str] = self.classifier[1](_A )
snake_case__ :Optional[Any] = None if labels is None else self.hf_compute_loss(labels=_A ,logits=_A )
if not return_dict:
snake_case__ :List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A ,logits=_A ,hidden_states=outputs.hidden_states )
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase_ ( __snake_case : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case__ :List[str] = args.pruning_method
snake_case__ :Any = args.threshold
snake_case__ :str = args.model_name_or_path.rstrip("/" )
snake_case__ :List[Any] = args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
snake_case__ :Optional[Any] = torch.load(os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) )
snake_case__ :List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case__ :Dict = tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
snake_case__ :str = tensor
print(F'Copied layer {name}' )
elif "bias" in name:
snake_case__ :Tuple = tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
snake_case__ :Dict = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
snake_case__ :Tuple = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case__ :Dict = name[:-6]
snake_case__ :Union[str, Any] = model[F'{prefix_}mask_scores']
snake_case__ :Any = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case__ :str = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case__ :Optional[int] = name[:-6]
snake_case__ :Dict = model[F'{prefix_}mask_scores']
snake_case__ :List[str] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case__ :List[str] = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case__ :Optional[Any] = name[:-6]
snake_case__ :Optional[Any] = model[F'{prefix_}mask_scores']
snake_case__ , snake_case__ :Optional[int] = -0.1, 1.1
snake_case__ :Optional[Any] = torch.sigmoid(lowerCAmelCase__ )
snake_case__ :int = s * (r - l) + l
snake_case__ :List[Any] = s_bar.clamp(min=0.0 , max=1.0 )
snake_case__ :str = tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
snake_case__ :Tuple = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F'bertarized_{os.path.basename(lowerCAmelCase__ )}' )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'\nCreated folder {target_model_path}' )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__UpperCAmelCase : str = parser.parse_args()
main(args)
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( __lowerCamelCase , unittest.TestCase ):
_A = AudioLDMPipeline
_A = TEXT_TO_AUDIO_PARAMS
_A = TEXT_TO_AUDIO_BATCH_PARAMS
_A = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def lowerCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
snake_case__ :str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=a_ ,)
snake_case__ :Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
torch.manual_seed(0 )
snake_case__ :str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
snake_case__ :Dict = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,projection_dim=32 ,)
snake_case__ :str = ClapTextModelWithProjection(a_ )
snake_case__ :Any = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
snake_case__ :Optional[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=16_000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=a_ ,)
snake_case__ :str = SpeechTaHifiGan(a_ )
snake_case__ :Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> int:
if str(a_ ).startswith("mps" ):
snake_case__ :str = torch.manual_seed(a_ )
else:
snake_case__ :List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
snake_case__ :int = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :List[Any] = self.get_dummy_components()
snake_case__ :Union[str, Any] = AudioLDMPipeline(**a_ )
snake_case__ :Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Optional[int] = self.get_dummy_inputs(a_ )
snake_case__ :Optional[int] = audioldm_pipe(**a_ )
snake_case__ :List[str] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) == 256
snake_case__ :Union[str, Any] = audio[:10]
snake_case__ :Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = self.get_dummy_components()
snake_case__ :Optional[Any] = AudioLDMPipeline(**a_ )
snake_case__ :List[Any] = audioldm_pipe.to(a_ )
snake_case__ :Dict = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :List[Any] = self.get_dummy_inputs(a_ )
snake_case__ :Tuple = 3 * [inputs["prompt"]]
# forward
snake_case__ :Tuple = audioldm_pipe(**a_ )
snake_case__ :Optional[int] = output.audios[0]
snake_case__ :List[Any] = self.get_dummy_inputs(a_ )
snake_case__ :Optional[Any] = 3 * [inputs.pop("prompt" )]
snake_case__ :Optional[Any] = audioldm_pipe.tokenizer(
a_ ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a_ ,return_tensors="pt" ,)
snake_case__ :Optional[Any] = text_inputs["input_ids"].to(a_ )
snake_case__ :Union[str, Any] = audioldm_pipe.text_encoder(
a_ ,)
snake_case__ :str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case__ :Optional[Any] = F.normalize(a_ ,dim=-1 )
snake_case__ :Optional[Any] = prompt_embeds
# forward
snake_case__ :List[Any] = audioldm_pipe(**a_ )
snake_case__ :Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = self.get_dummy_components()
snake_case__ :Tuple = AudioLDMPipeline(**a_ )
snake_case__ :Dict = audioldm_pipe.to(a_ )
snake_case__ :Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Union[str, Any] = self.get_dummy_inputs(a_ )
snake_case__ :Dict = 3 * ["this is a negative prompt"]
snake_case__ :Optional[int] = negative_prompt
snake_case__ :List[str] = 3 * [inputs["prompt"]]
# forward
snake_case__ :Optional[Any] = audioldm_pipe(**a_ )
snake_case__ :Dict = output.audios[0]
snake_case__ :Dict = self.get_dummy_inputs(a_ )
snake_case__ :int = 3 * [inputs.pop("prompt" )]
snake_case__ :Dict = []
for p in [prompt, negative_prompt]:
snake_case__ :Dict = audioldm_pipe.tokenizer(
a_ ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a_ ,return_tensors="pt" ,)
snake_case__ :str = text_inputs["input_ids"].to(a_ )
snake_case__ :Optional[Any] = audioldm_pipe.text_encoder(
a_ ,)
snake_case__ :int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case__ :List[str] = F.normalize(a_ ,dim=-1 )
embeds.append(a_ )
snake_case__ :List[str] = embeds
# forward
snake_case__ :Dict = audioldm_pipe(**a_ )
snake_case__ :Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :int = self.get_dummy_components()
snake_case__ :Any = PNDMScheduler(skip_prk_steps=a_ )
snake_case__ :List[Any] = AudioLDMPipeline(**a_ )
snake_case__ :Union[str, Any] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Union[str, Any] = self.get_dummy_inputs(a_ )
snake_case__ :List[str] = "egg cracking"
snake_case__ :Tuple = audioldm_pipe(**a_ ,negative_prompt=a_ )
snake_case__ :Tuple = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) == 256
snake_case__ :Optional[Any] = audio[:10]
snake_case__ :List[str] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Optional[Any] = self.get_dummy_components()
snake_case__ :int = PNDMScheduler(skip_prk_steps=a_ )
snake_case__ :int = AudioLDMPipeline(**a_ )
snake_case__ :Dict = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :List[str] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
snake_case__ :Any = audioldm_pipe(a_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
snake_case__ :List[Any] = 2
snake_case__ :Any = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
snake_case__ :List[str] = 2
snake_case__ :Any = audioldm_pipe(a_ ,num_inference_steps=2 ,num_waveforms_per_prompt=a_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
snake_case__ :Any = 2
snake_case__ :Tuple = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=a_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :List[str] = self.get_dummy_components()
snake_case__ :Optional[Any] = AudioLDMPipeline(**a_ )
snake_case__ :List[str] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
snake_case__ :Union[str, Any] = self.get_dummy_inputs(a_ )
snake_case__ :Union[str, Any] = audioldm_pipe(audio_length_in_s=0.016 ,**a_ )
snake_case__ :Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) / vocoder_sampling_rate == 0.016
snake_case__ :Dict = audioldm_pipe(audio_length_in_s=0.032 ,**a_ )
snake_case__ :List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) / vocoder_sampling_rate == 0.032
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = self.get_dummy_components()
snake_case__ :Tuple = AudioLDMPipeline(**a_ )
snake_case__ :Tuple = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Dict = ["hey"]
snake_case__ :int = audioldm_pipe(a_ ,num_inference_steps=1 )
snake_case__ :str = output.audios.shape
assert audio_shape == (1, 256)
snake_case__ :Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
snake_case__ :Tuple = SpeechTaHifiGan(a_ ).to(a_ )
snake_case__ :Optional[int] = audioldm_pipe(a_ ,num_inference_steps=1 )
snake_case__ :Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase_ ( self ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ )
def lowerCAmelCase_ ( self ) -> int:
self._test_inference_batch_single_identical(test_mean_pixel_difference=a_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def lowerCAmelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ )
@slow
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase="cpu" ,UpperCamelCase=torch.floataa ,UpperCamelCase=0 ) -> Dict:
snake_case__ :Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
snake_case__ :Optional[Any] = np.random.RandomState(a_ ).standard_normal((1, 8, 128, 16) )
snake_case__ :Dict = torch.from_numpy(a_ ).to(device=a_ ,dtype=a_ )
snake_case__ :Optional[Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
snake_case__ :Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :Tuple = self.get_inputs(a_ )
snake_case__ :Dict = 25
snake_case__ :Optional[Any] = audioldm_pipe(**a_ ).audios[0]
assert audio.ndim == 1
assert len(a_ ) == 81_920
snake_case__ :Tuple = audio[77_230:77_240]
snake_case__ :str = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
snake_case__ :int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
snake_case__ :Tuple = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
snake_case__ :List[str] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
snake_case__ :int = self.get_inputs(a_ )
snake_case__ :str = audioldm_pipe(**a_ ).audios[0]
assert audio.ndim == 1
assert len(a_ ) == 81_920
snake_case__ :Optional[int] = audio[27_780:27_790]
snake_case__ :Union[str, Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
snake_case__ :str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_A ):
_A = ['transformers', 'torch', 'note_seq']
def __init__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[int]:
requires_backends(self ,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCAmelCase_ ( cls ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
requires_backends(cls ,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCAmelCase_ ( cls ,*UpperCamelCase ,**UpperCamelCase ) -> Dict:
requires_backends(cls ,["transformers", "torch", "note_seq"] )
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCAmelCase : Dict = {
# 1536-bit
5: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=1_6,
),
'generator': 2,
},
}
class _snake_case :
def __init__( self ,UpperCamelCase = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case__ :Tuple = primes[group]["prime"]
snake_case__ :Union[str, Any] = primes[group]["generator"]
snake_case__ :int = int(hexlify(urandom(32 ) ) ,base=16 )
def lowerCAmelCase_ ( self ) -> str:
return hex(self.__private_key )[2:]
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = pow(self.generator ,self.__private_key ,self.prime )
return hex(_SCREAMING_SNAKE_CASE )[2:]
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_SCREAMING_SNAKE_CASE ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :List[Any] = int(_SCREAMING_SNAKE_CASE ,base=16 )
if not self.is_valid_public_key(_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid public key" )
snake_case__ :List[Any] = pow(_SCREAMING_SNAKE_CASE ,self.__private_key ,self.prime )
return shaaaa(str(_SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_SCREAMING_SNAKE_CASE ,(prime - 1) // 2 ,_SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 14 ) -> str:
snake_case__ :List[str] = int(_SCREAMING_SNAKE_CASE ,base=16 )
snake_case__ :Any = int(_SCREAMING_SNAKE_CASE ,base=16 )
snake_case__ :Optional[Any] = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid public key" )
snake_case__ :List[str] = pow(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return shaaaa(str(_SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
def lowercase_ ( __snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
def lowercase_ ( __snake_case : int ) -> float:
'''simple docstring'''
snake_case__ :str = 0
while len(__snake_case ) > 1:
snake_case__ :Optional[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case__ :List[str] = files.index(min(__snake_case ) )
temp += files[min_index]
files.pop(__snake_case )
files.append(__snake_case )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _A , _A , unittest.TestCase ):
_A = AutoencoderKL
_A = 'sample'
_A = 1e-2
@property
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Dict = 4
snake_case__ :List[Any] = 3
snake_case__ :Any = (32, 32)
snake_case__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case__ :List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self ) -> str:
pass
@unittest.skipIf(torch_device == "mps" ,"Gradient checkpointing skipped on MPS" )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ :Optional[int] = self.prepare_init_args_and_inputs_for_common()
snake_case__ :Optional[Any] = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
snake_case__ :Tuple = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case__ :List[Any] = torch.randn_like(__a )
snake_case__ :Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case__ :List[str] = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case__ :Optional[Any] = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case__ :List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
snake_case__ :Tuple = dict(model.named_parameters() )
snake_case__ :Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5E-5 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ , snake_case__ :int = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ,output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(__a )
snake_case__ :Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
snake_case__ :Optional[int] = model.to(__a )
model.eval()
if torch_device == "mps":
snake_case__ :Tuple = torch.manual_seed(0 )
else:
snake_case__ :Tuple = torch.Generator(device=__a ).manual_seed(0 )
snake_case__ :Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
snake_case__ :List[str] = image.to(__a )
with torch.no_grad():
snake_case__ :Dict = model(__a ,sample_posterior=__a ,generator=__a ).sample
snake_case__ :Any = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case__ :Any = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
snake_case__ :Dict = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case__ :List[Any] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__a ,__a ,rtol=1E-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'
def lowerCAmelCase_ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ,UpperCamelCase=(4, 3, 512, 512) ,UpperCamelCase=False ) -> str:
snake_case__ :int = torch.floataa if fpaa else torch.floataa
snake_case__ :Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(__a ,__a ) ) ).to(__a ).to(__a )
return image
def lowerCAmelCase_ ( self ,UpperCamelCase="CompVis/stable-diffusion-v1-4" ,UpperCamelCase=False ) -> Union[str, Any]:
snake_case__ :List[Any] = "fp16" if fpaa else None
snake_case__ :int = torch.floataa if fpaa else torch.floataa
snake_case__ :Tuple = AutoencoderKL.from_pretrained(
__a ,subfolder="vae" ,torch_dtype=__a ,revision=__a ,)
model.to(__a ).eval()
return model
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ) -> Tuple:
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
snake_case__ :Dict = self.get_sd_vae_model()
snake_case__ :Optional[int] = self.get_sd_image(__a )
snake_case__ :Union[str, Any] = self.get_generator(__a )
with torch.no_grad():
snake_case__ :Optional[Any] = model(__a ,generator=__a ,sample_posterior=__a ).sample
assert sample.shape == image.shape
snake_case__ :Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ :List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a ,__a ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :str = self.get_sd_vae_model(fpaa=__a )
snake_case__ :Optional[int] = self.get_sd_image(__a ,fpaa=__a )
snake_case__ :Optional[Any] = self.get_generator(__a )
with torch.no_grad():
snake_case__ :Optional[Any] = model(__a ,generator=__a ,sample_posterior=__a ).sample
assert sample.shape == image.shape
snake_case__ :Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ :List[Any] = torch.tensor(__a )
assert torch_all_close(__a ,__a ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Dict = self.get_sd_vae_model()
snake_case__ :Tuple = self.get_sd_image(__a )
with torch.no_grad():
snake_case__ :Optional[int] = model(__a ).sample
assert sample.shape == image.shape
snake_case__ :str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ :Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a ,__a ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :str = self.get_sd_vae_model()
snake_case__ :List[Any] = self.get_sd_image(__a ,shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case__ :Tuple = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case__ :List[Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case__ :int = torch.tensor(__a )
assert torch_all_close(__a ,__a ,atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :str = self.get_sd_vae_model(fpaa=__a )
snake_case__ :Tuple = self.get_sd_image(__a ,shape=(3, 4, 64, 64) ,fpaa=__a )
with torch.no_grad():
snake_case__ :int = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case__ :List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ :Optional[int] = torch.tensor(__a )
assert torch_all_close(__a ,__a ,atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :Optional[Any] = self.get_sd_vae_model(fpaa=__a )
snake_case__ :str = self.get_sd_image(__a ,shape=(3, 4, 64, 64) ,fpaa=__a )
with torch.no_grad():
snake_case__ :Any = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ :Union[str, Any] = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a ,__a ,atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :List[Any] = self.get_sd_vae_model()
snake_case__ :Dict = self.get_sd_image(__a ,shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case__ :int = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ :Dict = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a ,__a ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :str = self.get_sd_vae_model()
snake_case__ :str = self.get_sd_image(__a )
snake_case__ :Tuple = self.get_generator(__a )
with torch.no_grad():
snake_case__ :str = model.encode(__a ).latent_dist
snake_case__ :List[str] = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case__ :List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case__ :Dict = torch.tensor(__a )
snake_case__ :str = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a ,__a ,atol=__a )
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
snake_case__ :List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
snake_case__ :Optional[int] = """xvjiarui/stable-diffusion-2-inpainting"""
snake_case__ :Optional[int] = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ ,safety_checker=lowerCamelCase_ )
snake_case__ :str = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case__ :Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ :Union[str, Any] = 50
snake_case__ :int = jax.device_count()
snake_case__ :List[Any] = num_samples * [prompt]
snake_case__ :str = num_samples * [init_image]
snake_case__ :Optional[int] = num_samples * [mask_image]
snake_case__ :Any = pipeline.prepare_inputs(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# shard inputs and rng
snake_case__ :Optional[int] = replicate(lowerCamelCase_ )
snake_case__ :str = jax.random.split(lowerCamelCase_ ,jax.device_count() )
snake_case__ :Union[str, Any] = shard(lowerCamelCase_ )
snake_case__ :str = shard(lowerCamelCase_ )
snake_case__ :Any = shard(lowerCamelCase_ )
snake_case__ :Optional[int] = pipeline(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,jit=lowerCamelCase_ )
snake_case__ :str = output.images.reshape(lowerCamelCase_ ,512 ,512 ,3 )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :Optional[Any] = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
from itertools import product
def lowercase_ ( __snake_case : int , __snake_case : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ :str = sides_number
snake_case__ :Union[str, Any] = max_face_number * dice_number
snake_case__ :Any = [0] * (max_total + 1)
snake_case__ :Tuple = 1
snake_case__ :Union[str, Any] = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
snake_case__ :Tuple = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[str] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case__ :Dict = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case__ :int = 0
snake_case__ :Optional[int] = 9
snake_case__ :Optional[Any] = 4 * 9
snake_case__ :int = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case__ :Tuple = (4**9) * (6**6)
snake_case__ :List[Any] = peter_wins_count / total_games_number
snake_case__ :Union[str, Any] = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :str = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
snake_case__ :int = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def lowercase_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Union[str, Any] = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
snake_case__ :Tuple = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , "dataset_info.json" ) )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :Optional[int] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
snake_case__ :Tuple = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case__ :Union[str, Any] = yaml.safe_dump(lowerCamelCase__ )
snake_case__ :Optional[int] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ :int = DatasetInfo()
snake_case__ :List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
snake_case__ :str = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case__ :str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case__ :List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , "README.md" ) )
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__UpperCAmelCase : Optional[int] = re.compile(R"\b(a|an|the)\b", re.UNICODE)
__UpperCAmelCase : List[Any] = None
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=a__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=a__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase_ ( __snake_case : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ :Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ :Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
def remove_articles(__snake_case : Optional[int] ):
return ARTICLES_REGEX.sub(" " , a__ )
def white_space_fix(__snake_case : List[Any] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Union[str, Any] ):
snake_case__ :Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowercase_ ( __snake_case : Dict ) -> List[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(a__ ).split()
def lowercase_ ( __snake_case : Optional[int] , __snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ :Dict = get_tokens(a__ )
snake_case__ :Dict = get_tokens(a__ )
snake_case__ :Tuple = collections.Counter(a__ ) & collections.Counter(a__ )
snake_case__ :List[Any] = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case__ :Dict = 1.0 * num_same / len(a__ )
snake_case__ :Union[str, Any] = 1.0 * num_same / len(a__ )
snake_case__ :List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase_ ( __snake_case : Optional[int] , __snake_case : str ) -> Optional[int]:
'''simple docstring'''
snake_case__ :int = {}
snake_case__ :Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ :str = qa["id"]
snake_case__ :Tuple = [t for t in qa["answers"]["text"] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case__ :Dict = [""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
snake_case__ :List[Any] = preds[qid]
# Take max over all gold answers
snake_case__ :List[Any] = max(compute_exact(a__ , a__ ) for a in gold_answers )
snake_case__ :Any = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ :List[str] = {}
for qid, s in scores.items():
snake_case__ :Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case__ :Optional[Any] = float(not qid_to_has_ans[qid] )
else:
snake_case__ :int = s
return new_scores
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Dict=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
snake_case__ :List[Any] = len(a__ )
return collections.OrderedDict(
[
("exact", 1_0_0.0 * sum(exact_scores.values() ) / total),
("f1", 1_0_0.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
snake_case__ :Optional[int] = len(a__ )
return collections.OrderedDict(
[
("exact", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowercase_ ( __snake_case : str , __snake_case : Any , __snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for k in new_eval:
snake_case__ :Dict = new_eval[k]
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str ) -> str:
'''simple docstring'''
plt.step(a__ , a__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(a__ , a__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def lowercase_ ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = sorted(a__ , key=lambda __snake_case : na_probs[k] )
snake_case__ :Optional[Any] = 0.0
snake_case__ :Optional[Any] = 1.0
snake_case__ :List[str] = 0.0
snake_case__ :Optional[Any] = [1.0]
snake_case__ :Tuple = [0.0]
snake_case__ :Union[str, Any] = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case__ :int = true_pos / float(i + 1 )
snake_case__ :Union[str, Any] = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 1_0_0.0 * avg_prec}
def lowercase_ ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Any:
'''simple docstring'''
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
snake_case__ :int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case__ :Tuple = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
snake_case__ :Any = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
snake_case__ :Tuple = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
snake_case__ :str = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(a__ , a__ , "pr_exact" )
merge_eval(a__ , a__ , "pr_f1" )
merge_eval(a__ , a__ , "pr_oracle" )
def lowercase_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ) -> Optional[int]:
'''simple docstring'''
if not qid_list:
return
snake_case__ :Any = [na_probs[k] for k in qid_list]
snake_case__ :int = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(a__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def lowercase_ ( __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ :Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case__ :Optional[Any] = num_no_ans
snake_case__ :Tuple = cur_score
snake_case__ :int = 0.0
snake_case__ :List[str] = sorted(a__ , key=lambda __snake_case : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case__ :List[str] = scores[qid]
else:
if preds[qid]:
snake_case__ :Tuple = -1
else:
snake_case__ :int = 0
cur_score += diff
if cur_score > best_score:
snake_case__ :Any = cur_score
snake_case__ :str = na_probs[qid]
return 1_0_0.0 * best_score / len(a__ ), best_thresh
def lowercase_ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ :Optional[Any] = find_best_thresh(a__ , a__ , a__ , a__ )
snake_case__ , snake_case__ :str = find_best_thresh(a__ , a__ , a__ , a__ )
snake_case__ :str = best_exact
snake_case__ :Optional[int] = exact_thresh
snake_case__ :Tuple = best_fa
snake_case__ :Dict = fa_thresh
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case__ :int = json.load(a__ )
snake_case__ :Any = dataset_json["data"]
with open(OPTS.pred_file ) as f:
snake_case__ :Union[str, Any] = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case__ :Any = json.load(a__ )
else:
snake_case__ :int = {k: 0.0 for k in preds}
snake_case__ :Tuple = make_qid_to_has_ans(a__ ) # maps qid to True/False
snake_case__ :Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
snake_case__ :Dict = [k for k, v in qid_to_has_ans.items() if not v]
snake_case__ , snake_case__ :int = get_raw_scores(a__ , a__ )
snake_case__ :List[str] = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
snake_case__ :Union[str, Any] = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
snake_case__ :Any = make_eval_dict(a__ , a__ )
if has_ans_qids:
snake_case__ :List[Any] = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , "HasAns" )
if no_ans_qids:
snake_case__ :Optional[int] = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 715
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 0
|
import torch
from transformers import AutoModel
class _snake_case ( torch.nn.Module ):
def __init__( self ,UpperCamelCase="sayef/fsner-bert-base-uncased" ) -> str:
super(lowerCAmelCase_ ,self ).__init__()
snake_case__ :str = AutoModel.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ )
snake_case__ :int = torch.nn.CosineSimilarity(3 ,1E-08 )
snake_case__ :Union[str, Any] = torch.nn.Softmax(dim=1 )
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Any:
return self.bert(**lowerCAmelCase_ ).last_hidden_state
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
return token_embeddings.sum(2 ,keepdim=lowerCAmelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=1 ) -> Union[str, Any]:
return self.softmax(T * self.cos(lowerCAmelCase_ ,lowerCAmelCase_ ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[Any] = W_supports["sizes"].tolist()
snake_case__ :Union[str, Any] = W_supports["start_token_id"].item()
snake_case__ :List[str] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case__ :Dict = self.BERT(**lowerCAmelCase_ )
snake_case__ :Tuple = self.BERT(**lowerCAmelCase_ )
snake_case__ :Any = None
snake_case__ :Tuple = None
snake_case__ :List[str] = W_supports["input_ids"] == start_token_id
snake_case__ :Dict = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowerCAmelCase_ ):
if i == 0:
snake_case__ :Union[str, Any] = 0
else:
snake_case__ :int = support_sizes[i - 1]
snake_case__ :List[str] = S[s : s + size][start_token_masks[s : s + size]]
snake_case__ :str = S[s : s + size][end_token_masks[s : s + size]]
snake_case__ :List[str] = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
snake_case__ :int = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case__ :Tuple = torch.vstack((p_starts, p_start) )
snake_case__ :Tuple = torch.vstack((p_ends, p_end) )
else:
snake_case__ :Optional[Any] = p_start
snake_case__ :Any = p_end
return p_starts, p_ends
| 716
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 0
|
def lowercase_ ( __snake_case : Union[str, Any] ) -> int:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
snake_case__ :str = 0
snake_case__ :Any = str(__snake_case )
while len(__snake_case ) != 1:
snake_case__ :int = [int(__snake_case ) for i in num_string]
snake_case__ :Tuple = 1
for i in range(0 , len(__snake_case ) ):
total *= numbers[i]
snake_case__ :List[Any] = str(__snake_case )
steps += 1
return steps
def lowercase_ ( __snake_case : str ) -> int:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
snake_case__ :str = 0
snake_case__ :Tuple = str(__snake_case )
while len(__snake_case ) != 1:
snake_case__ :Any = [int(__snake_case ) for i in num_string]
snake_case__ :List[str] = 0
for i in range(0 , len(__snake_case ) ):
total += numbers[i]
snake_case__ :List[str] = str(__snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 0
|
from __future__ import annotations
def lowercase_ ( __snake_case : List[str] , __snake_case : int ) -> bool:
'''simple docstring'''
snake_case__ :Union[str, Any] = get_failure_array(__snake_case )
# 2) Step through text searching for pattern
snake_case__ :Union[str, Any] = 0, 0 # index into text, pattern
while i < len(__snake_case ):
if pattern[j] == text[i]:
if j == (len(__snake_case ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case__ :str = failure[j - 1]
continue
i += 1
return False
def lowercase_ ( __snake_case : int ) -> list[int]:
'''simple docstring'''
snake_case__ :Tuple = [0]
snake_case__ :List[str] = 0
snake_case__ :Optional[int] = 1
while j < len(__snake_case ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case__ :Dict = failure[i - 1]
continue
j += 1
failure.append(__snake_case )
return failure
if __name__ == "__main__":
# Test 1)
__UpperCAmelCase : Any = """abc1abc12"""
__UpperCAmelCase : int = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__UpperCAmelCase : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__UpperCAmelCase : str = """ABABX"""
__UpperCAmelCase : List[str] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__UpperCAmelCase : Dict = """AAAB"""
__UpperCAmelCase : int = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__UpperCAmelCase : Optional[int] = """abcdabcy"""
__UpperCAmelCase : str = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__UpperCAmelCase : Tuple = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 718
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 0
|
from typing import List
import numpy as np
def lowercase_ ( __snake_case : dict ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = {key: len(__a ) for key, value in gen_kwargs.items() if isinstance(__a , __a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
snake_case__ :Optional[Any] = max(lists_lengths.values() , default=0 )
return max(1 , __a )
def lowercase_ ( __snake_case : int , __snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :str = []
for group_idx in range(__a ):
snake_case__ :Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case__ :Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case__ :List[Any] = range(__a , start + num_shards_to_add )
shards_indices_per_group.append(__a )
return shards_indices_per_group
def lowercase_ ( __snake_case : dict , __snake_case : int ) -> List[str]:
'''simple docstring'''
snake_case__ :Union[str, Any] = _number_of_shards_in_gen_kwargs(__a )
if num_shards == 1:
return [dict(__a )]
else:
snake_case__ :Optional[Any] = _distribute_shards(num_shards=__a , max_num_jobs=__a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__a , __a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__a ) )
]
def lowercase_ ( __snake_case : List[dict] ) -> Optional[Any]:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowercase_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> str:
'''simple docstring'''
snake_case__ :List[Any] = {len(__a ) for value in gen_kwargs.values() if isinstance(__a , __a )}
snake_case__ :Tuple = {}
for size in list_sizes:
snake_case__ :int = list(range(__a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case__ :Dict = dict(__a )
for key, value in shuffled_kwargs.items():
if isinstance(__a , __a ):
snake_case__ :Union[str, Any] = [value[i] for i in indices_per_size[len(__a )]]
return shuffled_kwargs
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase_ ( __snake_case : int ) -> List[str]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ) -> Dict:
'''simple docstring'''
snake_case__ :List[str] = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def lowercase_ ( __snake_case : int = 2_00_00_00 ) -> Any:
'''simple docstring'''
return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 720
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__UpperCAmelCase : str = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( lowercase__ ):
def __init__( self ,*UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,**UpperCamelCase ) -> List[str]:
super().__init__(*UpperCAmelCase__ ,**UpperCAmelCase__ )
snake_case__ :Dict = eval_examples
snake_case__ :Union[str, Any] = post_process_function
snake_case__ :Optional[Any] = quant_trainer_args
snake_case__ :Tuple = 128 # default number of calibration samples
def lowerCAmelCase_ ( self ,UpperCamelCase=None ) -> List[str]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
snake_case__ :Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case__ :str = self._remove_unused_columns(UpperCAmelCase__ ,description="Calibration" )
return DataLoader(
UpperCAmelCase__ ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=UpperCAmelCase__ ,)
def lowerCAmelCase_ ( self ,UpperCamelCase=None ) -> Union[str, Any]:
snake_case__ :int = self.train_dataset if calib_dataset is None else calib_dataset
snake_case__ :Any = self.get_calib_dataloader(UpperCAmelCase__ )
snake_case__ :Optional[int] = self.model
quant_trainer.configure_model(UpperCAmelCase__ ,self.quant_trainer_args ,calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info("***** Running calibration *****" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
snake_case__ :Any = self.prediction_step(UpperCAmelCase__ ,UpperCAmelCase__ ,prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ ,self.quant_trainer_args )
snake_case__ :Union[str, Any] = model
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase = "eval" ) -> str:
snake_case__ :str = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ :List[Any] = self.get_eval_dataloader(UpperCAmelCase__ )
snake_case__ :Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ :Optional[int] = self.compute_metrics
snake_case__ :Dict = None
snake_case__ :int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ :List[str] = eval_loop(
UpperCAmelCase__ ,description="Evaluation" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=UpperCAmelCase__ ,)
finally:
snake_case__ :Tuple = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case__ :Tuple = self.post_process_function(UpperCAmelCase__ ,UpperCAmelCase__ ,output.predictions )
snake_case__ :Optional[int] = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
snake_case__ :Optional[int] = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
snake_case__ :str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ :Any = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,UpperCAmelCase__ )
return metrics
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase = "test" ) -> Any:
snake_case__ :Any = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ :List[str] = self.compute_metrics
snake_case__ :List[str] = None
snake_case__ :Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ :List[Any] = eval_loop(
UpperCAmelCase__ ,description="Prediction" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=UpperCAmelCase__ ,)
finally:
snake_case__ :Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ :List[str] = self.post_process_function(UpperCAmelCase__ ,UpperCAmelCase__ ,output.predictions ,"predict" )
snake_case__ :Any = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
snake_case__ :str = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=UpperCAmelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase="./" ) -> Optional[Any]:
snake_case__ :str = self.eval_dataset
snake_case__ :List[str] = self.get_eval_dataloader(UpperCAmelCase__ )
snake_case__ :Union[str, Any] = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
snake_case__ :Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
snake_case__ :Any = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
snake_case__ :str = True
snake_case__ :Optional[int] = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
snake_case__ :Optional[int] = model.module if hasattr(UpperCAmelCase__ ,"module" ) else model
quant_trainer.configure_model(UpperCAmelCase__ ,self.quant_trainer_args )
snake_case__ :Dict = os.path.join(UpperCAmelCase__ ,"model.onnx" )
logger.info(f'exporting model to {output_model_file}' )
snake_case__ :Any = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,export_params=UpperCAmelCase__ ,opset_version=13 ,do_constant_folding=UpperCAmelCase__ ,input_names=["input_ids", "attention_mask", "token_type_ids"] ,output_names=["output_start_logits", "output_end_logits"] ,dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} ,verbose=UpperCAmelCase__ ,)
logger.info("onnx export finished" )
| 721
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase : Dict = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _snake_case ( lowerCamelCase__ ):
_A = 'wavlm'
def __init__( self ,UpperCamelCase=32 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase="group" ,UpperCamelCase="gelu" ,UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,UpperCamelCase=False ,UpperCamelCase=128 ,UpperCamelCase=16 ,UpperCamelCase=320 ,UpperCamelCase=800 ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.05 ,UpperCamelCase=10 ,UpperCamelCase=2 ,UpperCamelCase=0.0 ,UpperCamelCase=10 ,UpperCamelCase=320 ,UpperCamelCase=2 ,UpperCamelCase=0.1 ,UpperCamelCase=100 ,UpperCamelCase=256 ,UpperCamelCase=256 ,UpperCamelCase=0.1 ,UpperCamelCase="mean" ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=256 ,UpperCamelCase=(512, 512, 512, 512, 1_500) ,UpperCamelCase=(5, 3, 3, 1, 1) ,UpperCamelCase=(1, 2, 3, 1, 1) ,UpperCamelCase=512 ,UpperCamelCase=80 ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=False ,UpperCamelCase=3 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=None ,**UpperCamelCase ,) -> Tuple:
super().__init__(**__lowerCamelCase ,pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase )
snake_case__ :Tuple = hidden_size
snake_case__ :Dict = feat_extract_norm
snake_case__ :List[Any] = feat_extract_activation
snake_case__ :Union[str, Any] = list(__lowerCamelCase )
snake_case__ :Union[str, Any] = list(__lowerCamelCase )
snake_case__ :Tuple = list(__lowerCamelCase )
snake_case__ :int = conv_bias
snake_case__ :Optional[int] = num_buckets
snake_case__ :Dict = max_bucket_distance
snake_case__ :Optional[Any] = num_conv_pos_embeddings
snake_case__ :Dict = num_conv_pos_embedding_groups
snake_case__ :Union[str, Any] = len(self.conv_dim )
snake_case__ :List[str] = num_hidden_layers
snake_case__ :Union[str, Any] = intermediate_size
snake_case__ :Optional[Any] = hidden_act
snake_case__ :List[Any] = num_attention_heads
snake_case__ :Tuple = hidden_dropout
snake_case__ :List[Any] = attention_dropout
snake_case__ :List[str] = activation_dropout
snake_case__ :Any = feat_proj_dropout
snake_case__ :int = final_dropout
snake_case__ :Optional[Any] = layerdrop
snake_case__ :Dict = layer_norm_eps
snake_case__ :Optional[int] = initializer_range
snake_case__ :int = num_ctc_classes
snake_case__ :Tuple = vocab_size
snake_case__ :Any = do_stable_layer_norm
snake_case__ :List[str] = use_weighted_layer_sum
snake_case__ :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ :Union[str, Any] = apply_spec_augment
snake_case__ :List[Any] = mask_time_prob
snake_case__ :int = mask_time_length
snake_case__ :Union[str, Any] = mask_time_min_masks
snake_case__ :List[Any] = mask_feature_prob
snake_case__ :List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ :List[str] = num_codevectors_per_group
snake_case__ :str = num_codevector_groups
snake_case__ :Dict = contrastive_logits_temperature
snake_case__ :Any = num_negatives
snake_case__ :List[str] = codevector_dim
snake_case__ :str = proj_codevector_dim
snake_case__ :Union[str, Any] = diversity_loss_weight
# ctc loss
snake_case__ :Any = ctc_loss_reduction
snake_case__ :Dict = ctc_zero_infinity
# adapter
snake_case__ :int = add_adapter
snake_case__ :Dict = adapter_kernel_size
snake_case__ :Dict = adapter_stride
snake_case__ :Dict = num_adapter_layers
snake_case__ :str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ :Union[str, Any] = list(__lowerCamelCase )
snake_case__ :int = list(__lowerCamelCase )
snake_case__ :List[Any] = list(__lowerCamelCase )
snake_case__ :int = xvector_output_dim
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 701
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 0
|
import baseaa
def lowercase_ ( __snake_case : Union[str, Any] ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def lowercase_ ( __snake_case : Optional[int] ) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase__ ).decode("utf-8" )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = "Hello World!"
__UpperCAmelCase : List[str] = baseaa_encode(test)
print(encoded)
__UpperCAmelCase : Any = baseaa_decode(encoded)
print(decoded)
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
def lowercase_ ( __snake_case : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ :Tuple = generate_pascal_triangle(__SCREAMING_SNAKE_CASE )
for row_idx in range(__SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowercase_ ( __snake_case : List[Any] ) -> int:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
snake_case__ :list[list[int]] = []
for current_row_idx in range(__SCREAMING_SNAKE_CASE ):
snake_case__ :Dict = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
triangle.append(__SCREAMING_SNAKE_CASE )
return triangle
def lowercase_ ( __snake_case : str , __snake_case : Dict ) -> str:
'''simple docstring'''
snake_case__ :Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case__ :Union[str, Any] = 1, 1
for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ):
calculate_current_element(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return current_row
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case__ :Optional[Any] = triangle[current_row_idx - 1][current_col_idx]
snake_case__ :List[Any] = above_to_left_elt + above_to_right_elt
def lowercase_ ( __snake_case : Any ) -> List[Any]:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
snake_case__ :list[list[int]] = [[1]]
for row_index in range(1 , __SCREAMING_SNAKE_CASE ):
snake_case__ :Any = [0] + result[-1] + [0]
snake_case__ :Dict = row_index + 1
# Calculate the number of distinct elements in a row
snake_case__ :Any = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) )
snake_case__ :Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case__ :Dict = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case__ :Optional[int] = row_first_half + row_second_half
result.append(__SCREAMING_SNAKE_CASE )
return result
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__snake_case : str , __snake_case : int ) -> None:
snake_case__ :int = F'{func.__name__}({value})'
snake_case__ :List[Any] = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,"models/bert/" ) )
snake_case__ :Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(A_ ,"src/transformers/models/bert/modeling_bert.py" ) ,os.path.join(self.transformer_dir ,"models/bert/modeling_bert.py" ) ,)
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> int:
snake_case__ :Union[str, Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
snake_case__ :Tuple = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
snake_case__ :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
snake_case__ :List[str] = black.format_str(A_ ,mode=A_ )
snake_case__ :List[str] = os.path.join(self.transformer_dir ,"new_code.py" )
with open(A_ ,"w" ,newline="\n" ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=A_ )
with open(A_ ,"r" ) as f:
self.assertTrue(f.read() ,A_ )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(A_ ,A_ )
def lowerCAmelCase_ ( self ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" ,"BertLMPredictionHead" ,REFERENCE_CODE + "\n" ,)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" ,"BertLMPredictionHead" ,A_ ,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" ,"TestModelLMPredictionHead" ,re.sub("Bert" ,"TestModel" ,A_ ) ,)
# Copy consistency with a really long name
snake_case__ :Tuple = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,f'{long_class_name}LMPredictionHead' ,re.sub("Bert" ,A_ ,A_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" ,"TestModelLMPredictionHead" ,A_ ,overwrite_result=re.sub("Bert" ,"TestModel" ,A_ ) ,)
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
snake_case__ :Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
snake_case__ :Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ :Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
snake_case__ , snake_case__ :Optional[Any] = check_copies.convert_to_localized_md(
A_ ,A_ ,localized_readme["format_model_list"] )
self.assertFalse(A_ )
self.assertEqual(A_ ,A_ )
snake_case__ , snake_case__ :Tuple = check_copies.convert_to_localized_md(
A_ ,A_ ,localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(A_ )
snake_case__ :int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
snake_case__ :Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ :Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ , snake_case__ :Dict = check_copies.convert_to_localized_md(
A_ ,A_ ,localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(A_ ,A_ )
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
def lowercase_ ( __snake_case : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
snake_case__ :Dict = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case__ :Tuple = 2
snake_case__ :List[str] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case__ :str = i
while n % i == 0:
snake_case__ :str = n // i
i += 1
return int(_lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : int , __snake_case : Optional[int] = None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :str = {}
if train_file is not None:
snake_case__ :Dict = [train_file]
if eval_file is not None:
snake_case__ :Union[str, Any] = [eval_file]
if test_file is not None:
snake_case__ :str = [test_file]
snake_case__ :Tuple = datasets.load_dataset("csv" , data_files=_SCREAMING_SNAKE_CASE )
snake_case__ :Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ :Optional[Any] = features_name.pop(_SCREAMING_SNAKE_CASE )
snake_case__ :Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ :Optional[Any] = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )}
snake_case__ :Tuple = tokenizer.model_input_names
snake_case__ :Optional[Any] = {}
if len(_SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
snake_case__ :Optional[int] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" ) , batched=_SCREAMING_SNAKE_CASE , )
elif len(_SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
snake_case__ :Any = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" , ) , batched=_SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ :int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ :Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ :Tuple = {k: v for k, v in ex.items() if k in input_names}
snake_case__ :Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ :str = {k: v for k, v in ex.items() if k in input_names}
snake_case__ :Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ :int = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ :List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ :str = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ :Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ :List[str] = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ :Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class _snake_case :
_A = field(metadata={'help': 'Which column contains the label'} )
_A = field(default=_A , metadata={'help': 'The path of the training file'} )
_A = field(default=_A , metadata={'help': 'The path of the development file'} )
_A = field(default=_A , metadata={'help': 'The path of the test file'} )
_A = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_A = field(
default=_A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _snake_case :
_A = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_A = field(
default=_A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_A = field(
default=_A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_A = field(default=_A , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A = field(
default=_A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def lowercase_ ( ) -> int:
'''simple docstring'''
snake_case__ :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ :Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ :int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ :Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ :Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(__snake_case : EvalPrediction ) -> Dict:
snake_case__ :Tuple = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ :Optional[int] = TFTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ :Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case__ :Optional[int] = trainer.evaluate()
snake_case__ :Any = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(_SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : str = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _snake_case ( A_ ):
_A = '''vit_msn'''
def __init__( self ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-06 ,UpperCamelCase=224 ,UpperCamelCase=16 ,UpperCamelCase=3 ,UpperCamelCase=True ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(**UpperCamelCase )
snake_case__ :Optional[int] = hidden_size
snake_case__ :Any = num_hidden_layers
snake_case__ :Dict = num_attention_heads
snake_case__ :List[str] = intermediate_size
snake_case__ :Tuple = hidden_act
snake_case__ :Optional[int] = hidden_dropout_prob
snake_case__ :List[Any] = attention_probs_dropout_prob
snake_case__ :Dict = initializer_range
snake_case__ :str = layer_norm_eps
snake_case__ :Union[str, Any] = image_size
snake_case__ :str = patch_size
snake_case__ :List[str] = num_channels
snake_case__ :List[Any] = qkv_bias
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowercase_ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str]=8 ):
'''simple docstring'''
snake_case__ :str = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case__ :int = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _snake_case ( __A ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> int:
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ,unet=UpperCamelCase__ ,scheduler=UpperCamelCase__ ,movq=UpperCamelCase__ ,)
snake_case__ :int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
if latents is None:
snake_case__ :Optional[int] = randn_tensor(UpperCamelCase__ ,generator=UpperCamelCase__ ,device=UpperCamelCase__ ,dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
snake_case__ :str = latents.to(UpperCamelCase__ )
snake_case__ :Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,) -> str:
snake_case__ :Dict = len(UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else 1
# get prompt text embeddings
snake_case__ :Union[str, Any] = self.tokenizer(
UpperCamelCase__ ,padding="max_length" ,truncation=UpperCamelCase__ ,max_length=77 ,return_attention_mask=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,return_tensors="pt" ,)
snake_case__ :Optional[Any] = text_inputs.input_ids
snake_case__ :int = self.tokenizer(UpperCamelCase__ ,padding="longest" ,return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :List[str] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case__ :Tuple = text_input_ids.to(UpperCamelCase__ )
snake_case__ :str = text_inputs.attention_mask.to(UpperCamelCase__ )
snake_case__ :Optional[int] = self.text_encoder(
input_ids=UpperCamelCase__ ,attention_mask=UpperCamelCase__ )
snake_case__ :List[str] = prompt_embeds.repeat_interleave(UpperCamelCase__ ,dim=0 )
snake_case__ :int = text_encoder_hidden_states.repeat_interleave(UpperCamelCase__ ,dim=0 )
snake_case__ :str = text_mask.repeat_interleave(UpperCamelCase__ ,dim=0 )
if do_classifier_free_guidance:
snake_case__ :List[str]
if negative_prompt is None:
snake_case__ :int = [''] * batch_size
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='
f' {type(UpperCamelCase__ )}.' )
elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Union[str, Any] = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
snake_case__ :List[Any] = negative_prompt
snake_case__ :Dict = self.tokenizer(
UpperCamelCase__ ,padding="max_length" ,max_length=77 ,truncation=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,return_tensors="pt" ,)
snake_case__ :List[str] = uncond_input.input_ids.to(UpperCamelCase__ )
snake_case__ :Dict = uncond_input.attention_mask.to(UpperCamelCase__ )
snake_case__ :List[Any] = self.text_encoder(
input_ids=UpperCamelCase__ ,attention_mask=UpperCamelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ :List[Any] = negative_prompt_embeds.shape[1]
snake_case__ :str = negative_prompt_embeds.repeat(1 ,UpperCamelCase__ )
snake_case__ :str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,UpperCamelCase__ )
snake_case__ :Any = uncond_text_encoder_hidden_states.shape[1]
snake_case__ :Union[str, Any] = uncond_text_encoder_hidden_states.repeat(1 ,UpperCamelCase__ ,1 )
snake_case__ :int = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,UpperCamelCase__ ,-1 )
snake_case__ :str = uncond_text_mask.repeat_interleave(UpperCamelCase__ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ :int = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case__ :Any = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case__ :str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case__ :Any = torch.device(f'cuda:{gpu_id}' )
snake_case__ :Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ ,UpperCamelCase__ )
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
snake_case__ :Optional[Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case__ :Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case__ :List[Any] = cpu_offload_with_hook(UpperCamelCase__ ,UpperCamelCase__ ,prev_module_hook=UpperCamelCase__ )
if self.safety_checker is not None:
snake_case__ :List[Any] = cpu_offload_with_hook(self.safety_checker ,UpperCamelCase__ ,prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
snake_case__ :List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> Any:
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = 512 ,UpperCamelCase = 512 ,UpperCamelCase = 100 ,UpperCamelCase = 4.0 ,UpperCamelCase = 1 ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = "pil" ,UpperCamelCase = True ,) -> int:
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Optional[int] = 1
elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Union[str, Any] = len(UpperCamelCase__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}' )
snake_case__ :str = self._execution_device
snake_case__ :str = batch_size * num_images_per_prompt
snake_case__ :int = guidance_scale > 1.0
snake_case__ :int = self._encode_prompt(
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Optional[Any] = torch.cat(UpperCamelCase__ ,dim=0 )
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
snake_case__ :Optional[Any] = torch.cat(UpperCamelCase__ ,dim=0 )
if do_classifier_free_guidance:
snake_case__ :Dict = image_embeds.repeat_interleave(UpperCamelCase__ ,dim=0 )
snake_case__ :Optional[int] = negative_image_embeds.repeat_interleave(UpperCamelCase__ ,dim=0 )
snake_case__ :List[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ ,device=UpperCamelCase__ )
snake_case__ :List[Any] = self.scheduler.timesteps
snake_case__ :Any = self.unet.config.in_channels
snake_case__ :Optional[Any] = get_new_h_w(UpperCamelCase__ ,UpperCamelCase__ ,self.movq_scale_factor )
# create initial latent
snake_case__ :Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ :Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ :List[str] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
snake_case__ :str = self.unet(
sample=UpperCamelCase__ ,timestep=UpperCamelCase__ ,encoder_hidden_states=UpperCamelCase__ ,added_cond_kwargs=UpperCamelCase__ ,return_dict=UpperCamelCase__ ,)[0]
if do_classifier_free_guidance:
snake_case__ :int = noise_pred.split(latents.shape[1] ,dim=1 )
snake_case__ :Dict = noise_pred.chunk(2 )
snake_case__ :str = variance_pred.chunk(2 )
snake_case__ :Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case__ :Tuple = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case__ :Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ :Any = self.scheduler.step(
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,generator=UpperCamelCase__ ,).prev_sample
# post-processing
snake_case__ :int = self.movq.decode(UpperCamelCase__ ,force_not_quantize=UpperCamelCase__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
snake_case__ :Union[str, Any] = image * 0.5 + 0.5
snake_case__ :Optional[int] = image.clamp(0 ,1 )
snake_case__ :List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
snake_case__ :Tuple = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
class _snake_case ( a__ ):
_A = ["pixel_values"]
def __init__( self ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PIL.Image.BICUBIC ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ,) -> None:
super().__init__(**lowerCamelCase_ )
snake_case__ :int = size if size is not None else {"height": 256, "width": 256}
snake_case__ :Dict = get_size_dict(lowerCamelCase_ )
snake_case__ :Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case__ :Union[str, Any] = get_size_dict(lowerCamelCase_ ,param_name="crop_size" )
snake_case__ :str = do_resize
snake_case__ :List[Any] = size
snake_case__ :Union[str, Any] = resample
snake_case__ :Any = do_center_crop
snake_case__ :List[Any] = crop_size
snake_case__ :Optional[Any] = do_rescale
snake_case__ :Tuple = rescale_factor
snake_case__ :Optional[Any] = do_normalize
snake_case__ :str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ :str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PIL.Image.BICUBIC ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :int = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ ,size=(size["height"], size["width"]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :int = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["height"], size["width"]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> List[Any]:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Tuple = do_resize if do_resize is not None else self.do_resize
snake_case__ :Union[str, Any] = resample if resample is not None else self.resample
snake_case__ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ :Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ :List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ :List[str] = image_std if image_std is not None else self.image_std
snake_case__ :Union[str, Any] = size if size is not None else self.size
snake_case__ :Optional[int] = get_size_dict(lowerCamelCase_ )
snake_case__ :List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case__ :int = get_size_dict(lowerCamelCase_ ,param_name="crop_size" )
snake_case__ :Optional[int] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case__ :str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
snake_case__ :List[str] = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
snake_case__ :Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
snake_case__ :Union[str, Any] = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
snake_case__ :Any = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
snake_case__ :Dict = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
import comet # From: unbabel-comet
import torch
import datasets
__UpperCAmelCase : List[Any] = datasets.logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__UpperCAmelCase : Union[str, Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__UpperCAmelCase : List[str] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://unbabel.github.io/COMET/html/index.html" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"sources": datasets.Value("string" ,id="sequence" ),
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/Unbabel/COMET"] ,reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
if self.config_name == "default":
snake_case__ :List[Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case__ :Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=False ) -> Tuple:
if gpus is None:
snake_case__ :Optional[Any] = 1 if torch.cuda.is_available() else 0
snake_case__ :Optional[int] = {"src": sources, "mt": predictions, "ref": references}
snake_case__ :str = [dict(zip(UpperCamelCase__ ,UpperCamelCase__ ) ) for t in zip(*data.values() )]
snake_case__ , snake_case__ :Optional[int] = self.scorer.predict(UpperCamelCase__ ,gpus=UpperCamelCase__ ,progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
import sys
__UpperCAmelCase : Union[str, Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase_ ( __snake_case : Dict = N ) -> int:
'''simple docstring'''
snake_case__ :int = -sys.maxsize - 1
for i in range(len(__lowerCAmelCase ) - 12 ):
snake_case__ :Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ :Optional[int] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _snake_case ( _UpperCamelCase ):
_A = 'van'
def __init__( self ,UpperCamelCase=224 ,UpperCamelCase=3 ,UpperCamelCase=[7, 3, 3, 3] ,UpperCamelCase=[4, 2, 2, 2] ,UpperCamelCase=[64, 128, 320, 512] ,UpperCamelCase=[3, 3, 12, 3] ,UpperCamelCase=[8, 8, 4, 4] ,UpperCamelCase="gelu" ,UpperCamelCase=0.02 ,UpperCamelCase=1E-6 ,UpperCamelCase=1E-2 ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,**UpperCamelCase ,) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ :Tuple = image_size
snake_case__ :Optional[int] = num_channels
snake_case__ :int = patch_sizes
snake_case__ :int = strides
snake_case__ :Tuple = hidden_sizes
snake_case__ :Optional[int] = depths
snake_case__ :int = mlp_ratios
snake_case__ :Optional[int] = hidden_act
snake_case__ :Optional[int] = initializer_range
snake_case__ :Optional[int] = layer_norm_eps
snake_case__ :Any = layer_scale_init_value
snake_case__ :Dict = drop_path_rate
snake_case__ :Tuple = dropout_rate
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.