code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = 1_01 ) -> Dict:
_A = length
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , lowerCAmelCase_ ) -> int:
return i
class a :
"""simple docstring"""
def __call__( self , lowerCAmelCase_ ) -> List[str]:
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class a ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_A = nn.Linear(1_20 , 80 )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a ( UpperCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_A = self.get_auto_remove_tmp_dir()
_A = F'''--output_dir {output_dir}'''.split()
_A = ['torchrun'] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a ( UpperCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> Tuple:
_A = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_A = self.get_auto_remove_tmp_dir()
_A = F'''--output_dir {output_dir}'''.split()
_A = ['torchrun'] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_SCREAMING_SNAKE_CASE = HfArgumentParser((TrainingArguments,))
_SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_SCREAMING_SNAKE_CASE = DummyDataset(dataset_length)
def snake_case ( snake_case__ :EvalPrediction) -> Dict:
_A = list(range(len(snake_case__)))
_A = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''')
return {"success": success}
_SCREAMING_SNAKE_CASE = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_SCREAMING_SNAKE_CASE = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_SCREAMING_SNAKE_CASE = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_SCREAMING_SNAKE_CASE = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_SCREAMING_SNAKE_CASE = None
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
from ...processing_utils import ProcessorMixin
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = ['''image_processor''', '''feature_extractor''']
lowerCamelCase :Union[str, Any] = '''TvltImageProcessor'''
lowerCamelCase :Any = '''TvltFeatureExtractor'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__(image_processor=__A , feature_extractor=__A )
_A = image_processor
_A = feature_extractor
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , *lowerCAmelCase_ , **lowerCAmelCase_ , ) -> Optional[Any]:
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_A = None
if images is not None:
_A = self.image_processor(__A , mask_pixel=__A , *__A , **__A )
if images_mixed is not None:
_A = self.image_processor(__A , is_mixed=__A , *__A , **__A )
if audio is not None:
_A = self.feature_extractor(
__A , *__A , sampling_rate=__A , mask_audio=__A , **__A )
_A = {}
if audio is not None:
output_dict.update(__A )
if images is not None:
output_dict.update(__A )
if images_mixed_dict is not None:
output_dict.update(__A )
return output_dict
@property
def UpperCAmelCase ( self ) -> int:
_A = self.image_processor.model_input_names
_A = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 718 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> List[str]:
def update_area_of_max_square(snake_case__ :int , snake_case__ :int) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_A = update_area_of_max_square(_lowerCamelCase , col + 1)
_A = update_area_of_max_square(row + 1 , col + 1)
_A = update_area_of_max_square(row + 1 , _lowerCamelCase)
if mat[row][col]:
_A = 1 + min([right, diagonal, down])
_A = max(largest_square_area[0] , _lowerCamelCase)
return sub_problem_sol
else:
return 0
_A = [0]
update_area_of_max_square(0 , 0)
return largest_square_area[0]
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
def update_area_of_max_square_using_dp_array(
snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_A = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase)
_A = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase)
_A = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase)
if mat[row][col]:
_A = 1 + min([right, diagonal, down])
_A = max(largest_square_area[0] , _lowerCamelCase)
_A = sub_problem_sol
return sub_problem_sol
else:
return 0
_A = [0]
_A = [[-1] * cols for _ in range(_lowerCamelCase)]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase)
return largest_square_area[0]
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> Optional[Any]:
_A = [[0] * (cols + 1) for _ in range(rows + 1)]
_A = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_A = dp_array[row][col + 1]
_A = dp_array[row + 1][col + 1]
_A = dp_array[row + 1][col]
if mat[row][col] == 1:
_A = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
_A = max(dp_array[row][col] , _lowerCamelCase)
else:
_A = 0
return largest_square_area
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :list[list[int]]) -> Optional[Any]:
_A = [0] * (cols + 1)
_A = [0] * (cols + 1)
_A = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_A = current_row[col + 1]
_A = next_row[col + 1]
_A = next_row[col]
if mat[row][col] == 1:
_A = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
_A = max(current_row[col] , _lowerCamelCase)
else:
_A = 0
_A = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 719 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 0 |
import argparse
import struct
import unittest
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_A = data
# Initialize hash values
_A = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
_A = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
_A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = b"""\x80""" + (b"""\x00""" * (63 - (len(A__ ) + 8) % 64))
_A = struct.pack(""">Q""" , (len(A__ ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase ( self ) -> Optional[Any]:
# Convert into blocks of 64 bytes
_A = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_A = list(struct.unpack(""">16L""" , A__ ) )
# add 48 0-ed integers
words += [0] * 48
_A = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_A = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_A = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
_A = self.ror(A__ , 6 ) ^ self.ror(A__ , 11 ) ^ self.ror(A__ , 25 )
_A = (e & f) ^ ((~e & 0xff_fff_fff) & g)
_A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
_A = self.ror(A__ , 2 ) ^ self.ror(A__ , 13 ) ^ self.ror(A__ , 22 )
_A = (a & b) ^ (a & c) ^ (b & c)
_A = (sa + maj) % 0x100_000_000
_A = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
_A = [a, b, c, d, e, f, g, h]
# Modify final values
_A = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
_A = """""".join([hex(A__ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
import hashlib
_A = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(A__ ).hash , hashlib.shaaaa(A__ ).hexdigest() )
def snake_case ( ) -> None:
import doctest
doctest.testmod()
_A = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""")
_A = parser.parse_args()
_A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
_A = f.read()
else:
_A = bytes(lowercase_ , """utf-8""")
print(SHAaaa(lowercase_).hash)
if __name__ == "__main__":
main()
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
import os
import sys
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> Optional[int]:
return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoTokenizer.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> int:
return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoModel.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> Dict:
return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoModelForCausalLM.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> str:
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoModelForMaskedLM.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> Any:
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoModelForSequenceClassification.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> Any:
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__)
def snake_case ( *snake_case__ , **snake_case__) -> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_SCREAMING_SNAKE_CASE = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def snake_case ( snake_case__ :int , snake_case__ :Any , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :str) -> Union[str, Any]:
for attribute in key.split("""."""):
_A = getattr(_lowerCamelCase , _lowerCamelCase)
if weight_type is not None:
_A = getattr(_lowerCamelCase , _lowerCamelCase).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int]) -> Tuple:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.feature_extractor
_A = hf_model.adapter
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_A = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""]):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
_A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_lowerCamelCase)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , _lowerCamelCase)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "bias" in name:
_A = """bias"""
elif "weight" in name:
_A = """weight"""
else:
_A = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
continue
if not is_used:
unused_weights.append(_lowerCamelCase)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int , snake_case__ :Union[str, Any] , snake_case__ :str) -> Tuple:
_A = full_name.split("""conv_layers.""")[-1]
_A = name.split(""".""")
_A = int(items[0])
_A = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_lowerCamelCase)
def snake_case ( snake_case__ :Any , snake_case__ :str , snake_case__ :Dict , snake_case__ :Any) -> Dict:
_A = full_name.split("""adaptor.""")[-1]
_A = name.split(""".""")
if items[1].isdigit():
_A = int(items[1])
else:
_A = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_A = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_A = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_A = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_A = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''')
elif isinstance(_lowerCamelCase , _lowerCamelCase):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_A = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_A = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
else:
unused_weights.append(_lowerCamelCase)
def snake_case ( snake_case__ :List[str]) -> int:
_A , _A = emb.weight.shape
_A = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
_A = emb.weight.data
return lin_layer
@torch.no_grad()
def snake_case ( snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :int , snake_case__ :str , snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , ) -> List[str]:
_A = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
_A = MBartConfig.from_pretrained(_lowerCamelCase)
# load model
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""")[:-1]),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
_A = model[0].eval()
# load feature extractor
_A = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase)
# set weights for wav2vec2 encoder
_A = WavaVecaModel(_lowerCamelCase)
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase)
# load decoder weights
_A = MBartForCausalLM(_lowerCamelCase)
_A , _A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase)
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''')
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''')
_A = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase)
_A = False
_A = MBartaaTokenizer(_lowerCamelCase)
tokenizer.save_pretrained(_lowerCamelCase)
_A = hf_wavavec.config.to_dict()
_A = tokenizer.pad_token_id
_A = tokenizer.bos_token_id
_A = tokenizer.eos_token_id
_A = """mbart50"""
_A = """wav2vec2"""
_A = tokenizer.eos_token_id
_A = 250_004
_A = tokenizer.eos_token_id
_A = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase)
hf_wavavec.save_pretrained(_lowerCamelCase)
feature_extractor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250_004, type=int, help='`decoder_start_token_id` of model config')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_SCREAMING_SNAKE_CASE = Lock()
def snake_case ( snake_case__ :Tuple , snake_case__ :str , snake_case__ :List[str] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] , snake_case__ :Tuple) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_A = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_A = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def snake_case ( snake_case__ :Any) -> List[str]:
_A = []
_A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_A = Pipe()
_A = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
_A = temp_rs
_A = temp_rr
for i in range(1 , len(snake_case__) - 1):
_A = Pipe()
_A = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
_A = temp_rs
_A = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
_A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def snake_case ( ) -> Any:
_A = list(range(10 , 0 , -1))
print("""Initial List""")
print(*snake_case__)
_A = odd_even_transposition(snake_case__)
print("""Sorted List\n""")
print(*snake_case__)
if __name__ == "__main__":
main()
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :str , snake_case__ :str=8) -> str:
_A = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_A = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if latents is None:
_A = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(lowercase__ )
_A = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , ) -> List[str]:
_A = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
_A = self.tokenizer(
lowercase__ , padding="""max_length""" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
_A = text_inputs.input_ids
_A = self.tokenizer(lowercase__ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
_A = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids.to(lowercase__ )
_A = text_inputs.attention_mask.to(lowercase__ )
_A = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
_A = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
_A = text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
_A = text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !='''
F''' {type(lowercase__ )}.''' )
elif isinstance(lowercase__ , lowercase__ ):
_A = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
_A = negative_prompt
_A = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
_A = uncond_input.input_ids.to(lowercase__ )
_A = uncond_input.attention_mask.to(lowercase__ )
_A = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = negative_prompt_embeds.shape[1]
_A = negative_prompt_embeds.repeat(1 , lowercase__ )
_A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
_A = uncond_text_encoder_hidden_states.shape[1]
_A = uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
_A = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
_A = uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_prompt_embeds, prompt_embeds] )
_A = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_A = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_A = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
_A = cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> int:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> Tuple:
if isinstance(lowercase__ , lowercase__ ):
_A = 1
elif isinstance(lowercase__ , lowercase__ ):
_A = len(lowercase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}''' )
_A = self._execution_device
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
_A = torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
_A = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(lowercase__ , dim=0 )
_A = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
_A = self.scheduler.timesteps
_A = self.unet.config.in_channels
_A = get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
_A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
_A = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
_A = noise_pred.split(latents.shape[1] , dim=1 )
_A = noise_pred.chunk(2 )
_A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
_A = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
import math
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any] = 0 , snake_case__ :Union[str, Any] = 0) -> list:
_A = end or len(SCREAMING_SNAKE_CASE_)
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
_A = i
_A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_A = array[temp_index - 1]
temp_index -= 1
_A = temp_index_value
return array
def snake_case ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :List[Any]) -> None: # Max Heap
_A = index
_A = 2 * index + 1 # Left Node
_A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_A = right_index
if largest != index:
_A = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def snake_case ( snake_case__ :Union[str, Any]) -> list:
_A = len(SCREAMING_SNAKE_CASE_)
for i in range(n // 2 , -1 , -1):
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for i in range(n - 1 , 0 , -1):
_A = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_)
return array
def snake_case ( snake_case__ :Tuple , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Union[str, Any]) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def snake_case ( snake_case__ :Dict , snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Optional[int]) -> int:
_A = low
_A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_A = array[j], array[i]
i += 1
def snake_case ( snake_case__ :Tuple) -> list:
if len(SCREAMING_SNAKE_CASE_) == 0:
return array
_A = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_)))
_A = 16
return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict , snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :Optional[int]) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE_)
max_depth -= 1
_A = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1)
_A = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
_A = p
return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma : ').strip()
_SCREAMING_SNAKE_CASE = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 703 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
def snake_case ( snake_case__ :float , snake_case__ :int) -> Any:
if digit_amount > 0:
return round(number - int(__lowercase) , __lowercase)
return number - int(__lowercase)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "" , lowerCAmelCase_ = False ) -> int:
# Mapping from the first character of the prefix of the node
_A = {}
# A node will be a leaf if the tree contains its word
_A = is_leaf
_A = prefix
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = 0
for q, w in zip(self.prefix , _A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
for word in words:
self.insert(_A )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_A = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_A = RadixNode(prefix=_A , is_leaf=_A )
else:
_A = self.nodes[word[0]]
_A , _A , _A = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_A = remaining_prefix
_A = self.nodes[matching_string[0]]
_A = RadixNode(_A , _A )
_A = aux_node
if remaining_word == "":
_A = True
else:
self.nodes[matching_string[0]].insert(_A )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
_A , _A , _A = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
_A , _A , _A = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_A = list(self.nodes.values() )[0]
_A = merging_node.is_leaf
self.prefix += merging_node.prefix
_A = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_A = False
# If there is 1 edge, we merge it with its child
else:
_A = list(incoming_node.nodes.values() )[0]
_A = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_A = merging_node.nodes
return True
def UpperCAmelCase ( self , lowerCAmelCase_ = 0 ) -> List[str]:
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case ( ) -> Dict:
_A = """banana bananas bandana band apple all beast""".split()
_A = RadixNode()
root.insert_many(__snake_case)
assert all(root.find(__snake_case) for word in words)
assert not root.find("""bandanas""")
assert not root.find("""apps""")
root.delete("""all""")
assert not root.find("""all""")
root.delete("""banana""")
assert not root.find("""banana""")
assert root.find("""bananas""")
return True
def snake_case ( ) -> List[Any]:
assert test_trie()
def snake_case ( ) -> Tuple:
_A = RadixNode()
_A = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__snake_case)
print("""Words:""" , __snake_case)
print("""Tree:""")
root.print_tree()
if __name__ == "__main__":
main()
| 705 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class a ( __a ):
"""simple docstring"""
lowerCamelCase :List[str] = '''xlm-roberta'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> int:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict , snake_case__ :Dict = 1 , snake_case__ :List[Any] = 1 , snake_case__ :Tuple = 1.0E4 , snake_case__ :Any = False , snake_case__ :Optional[Any] = 1.0 , ) -> Union[str, Any]:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_A = float(embedding_dim // 2)
_A = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
_A = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa) * -log_timescale_increment)
_A = jnp.expand_dims(lowerCAmelCase_ , 1) * jnp.expand_dims(lowerCAmelCase_ , 0)
# scale embeddings
_A = scale * emb
if flip_sin_to_cos:
_A = jnp.concatenate([jnp.cos(lowerCAmelCase_), jnp.sin(lowerCAmelCase_)] , axis=1)
else:
_A = jnp.concatenate([jnp.sin(lowerCAmelCase_), jnp.cos(lowerCAmelCase_)] , axis=1)
_A = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_)[0], embedding_dim])
return signal
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int = 32
lowerCamelCase :jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(a_ )
_A = nn.silu(a_ )
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(a_ )
return temb
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int = 32
lowerCamelCase :bool = False
lowerCamelCase :float = 1
@nn.compact
def __call__( self , lowerCAmelCase_ ) -> List[Any]:
return get_sinusoidal_embeddings(
a_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 707 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_SCREAMING_SNAKE_CASE = 'pt'
elif is_tf_available():
_SCREAMING_SNAKE_CASE = 'tf'
else:
_SCREAMING_SNAKE_CASE = 'jax'
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = PerceiverTokenizer
lowerCamelCase :List[Any] = False
def UpperCAmelCase ( self ) -> Any:
super().setUp()
_A = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ) -> int:
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Union[str, Any]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_A = []
for i in range(len(lowerCAmelCase_ ) ):
try:
_A = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_A = list(filter(lambda lowerCAmelCase_ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowerCAmelCase_ ) )
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self ) -> str:
_A = self.perceiver_tokenizer
_A = """Unicode €."""
_A = tokenizer(lowerCAmelCase_ )
_A = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """[CLS]Unicode €.[SEP]""" )
_A = tokenizer("""e è é ê ë""" )
_A = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.perceiver_tokenizer
_A = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_A = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
if FRAMEWORK != "jax":
_A = list(batch.input_ids.numpy()[0] )
else:
_A = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.perceiver_tokenizer
_A = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_input_ids""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.perceiver_tokenizer
_A = [
"""Summary of the text.""",
"""Another summary.""",
]
_A = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase ( self ) -> int:
# safety check on max_len default value so we are sure the test works
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_A = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
_A = [F'''<extra_id_{i}>''' for i in range(1_25 )]
_A = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_A = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(
lowerCAmelCase_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCAmelCase_ )]
_A = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def UpperCAmelCase ( self ) -> int:
_A = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
| 708 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 0 |
from datetime import datetime
import requests
def snake_case ( snake_case__ :str) -> bytes:
_A = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_A = requests.get(base_url + url).json()[0]["""urls"""][0]["""src"""]
return requests.get(_A).content
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('Enter Video/IGTV url: ').strip()
_SCREAMING_SNAKE_CASE = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 709 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a ( _A ):
"""simple docstring"""
lowerCamelCase :int = None
lowerCamelCase :List[Any] = None
lowerCamelCase :int = None
lowerCamelCase :Tuple = None
class a ( _A ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=5_12 , lowerCAmelCase_="cls" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
_A = project_dim
_A = pooler_fn
_A = learn_encoder
_A = use_attention_mask
class a ( _A ):
"""simple docstring"""
lowerCamelCase :Any = [r'''pooler''', r'''logit_scale''']
lowerCamelCase :Tuple = [r'''position_ids''', r'''predictions.decoder.bias''']
lowerCamelCase :List[str] = '''roberta'''
lowerCamelCase :List[Any] = RobertaSeriesConfig
def __init__( self , lowerCAmelCase_ ) -> List[str]:
super().__init__(UpperCamelCase__ )
_A = XLMRobertaModel(UpperCamelCase__ )
_A = nn.Linear(config.hidden_size , config.project_dim )
_A = getattr(UpperCamelCase__ , """has_pre_transformation""" , UpperCamelCase__ )
if self.has_pre_transformation:
_A = nn.Linear(config.hidden_size , config.project_dim )
_A = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> str:
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.base_model(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase__ , )
if self.has_pre_transformation:
_A = outputs["""hidden_states"""][-2]
_A = self.pre_LN(UpperCamelCase__ )
_A = self.transformation_pre(UpperCamelCase__ )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_A = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 710 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( snake_case__ ):
"""simple docstring"""
lowerCamelCase :List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=1_25 , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Any:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_A = [F'''<extra_id_{i}>''' for i in range(_A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda lowerCAmelCase_ : bool("""extra_id""" in str(_A ) ) , _A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
_A = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
_A = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
_A = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
super().__init__(
eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
_A = extra_ids
_A = 2**8 # utf is 8 bits
# define special tokens dict
_A = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_A = len(self.special_tokens_encoder )
_A = len(_A )
for i, token in enumerate(_A ):
_A = self.vocab_size + i - n
_A = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase ( self ) -> List[str]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Tuple:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_A )) + [1]
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if len(_A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> int:
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> str:
_A = self._add_eos_if_not_present(_A )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(_A )
return token_ids_a + token_ids_a
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_A = [chr(_A ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
if token in self.special_tokens_encoder:
_A = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_A = self.added_tokens_encoder[token]
elif len(_A ) != 1:
_A = self.unk_token_id
else:
_A = ord(_A ) + self._num_special_tokens
return token_id
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if index in self.special_tokens_decoder:
_A = self.special_tokens_decoder[index]
else:
_A = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = b''
for token in tokens:
if token in self.special_tokens_decoder:
_A = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
_A = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
_A = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
_A = token.encode("""utf-8""" )
else:
_A = bytes([ord(_A )] )
bstring += tok_string
_A = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> str:
return ()
| 711 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 250_004
_SCREAMING_SNAKE_CASE = 250_020
@require_sentencepiece
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = MBartaaTokenizer
lowerCamelCase :int = MBartaaTokenizerFast
lowerCamelCase :str = True
lowerCamelCase :List[str] = True
def UpperCAmelCase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A = MBartaaTokenizer(__a , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = """<s>"""
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCAmelCase ( self ) -> List[Any]:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 10_54 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def UpperCAmelCase ( self ) -> Tuple:
_A = MBartaaTokenizer(__a , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__a )
_A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
_A = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase ( self ) -> Any:
_A = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_A = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A = self.tokenizer_class.from_pretrained(__a , **__a )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__a )
_A = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__a )
_A = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_A = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__a )
_A = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_A = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__a )
_A = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''facebook/mbart-large-50-one-to-many-mmt'''
lowerCamelCase :Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase :Tuple = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase :Optional[int] = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCAmelCase ( cls ) -> str:
_A = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_A = 1
return cls
def UpperCAmelCase ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def UpperCAmelCase ( self ) -> List[str]:
self.assertIn(__a , self.tokenizer.all_special_ids )
_A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_A = self.tokenizer.decode(__a , skip_special_tokens=__a )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def UpperCAmelCase ( self ) -> int:
_A = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __a )
_A = 10
_A = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def UpperCAmelCase ( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
_A = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
_A = targets["""input_ids"""]
_A = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 712 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a ( lowercase_ ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''informer'''
lowerCamelCase :Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> str:
_A = prediction_length
_A = context_length or prediction_length
_A = distribution_output
_A = loss
_A = input_size
_A = num_time_features
_A = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A = scaling
_A = num_dynamic_real_features
_A = num_static_real_features
_A = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_A = cardinality
else:
_A = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_A = embedding_dimension
else:
_A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A = num_parallel_samples
# Transformer architecture configuration
_A = input_size * len(self.lags_sequence ) + self._number_of_features
_A = d_model
_A = encoder_attention_heads
_A = decoder_attention_heads
_A = encoder_ffn_dim
_A = decoder_ffn_dim
_A = encoder_layers
_A = decoder_layers
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = activation_function
_A = init_std
_A = use_cache
# Informer
_A = attention_type
_A = sampling_factor
_A = distil
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCAmelCase ( self ) -> List[str]:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
def snake_case ( snake_case__ :Any) -> str:
assert isinstance(lowerCamelCase__ , lowerCamelCase__), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(lowerCamelCase__)
else:
_A = sylvester(number - 1)
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = params
_A = np.array(lowercase_ )
_A = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase_ ) -> List[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> str:
return len(self.lengths )
def UpperCAmelCase ( self ) -> Union[str, Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.params.max_model_input_size
_A = self.lengths > max_len
logger.info(F'''Splitting {sum(lowercase_ )} too long sequences.''' )
def divide_chunks(lowerCAmelCase_ , lowerCAmelCase_ ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
_A = []
_A = []
if self.params.mlm:
_A , _A = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_A , _A = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_A = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_A = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
_A = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
_A = np.array(lowercase_ )
_A = np.array(lowercase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = len(self )
_A = self.lengths > 11
_A = self.token_ids[indices]
_A = self.lengths[indices]
_A = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCAmelCase ( self ) -> Tuple:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_A = self.params.special_tok_ids["""unk_token"""]
_A = len(self )
_A = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_A = (unk_occs / self.lengths) < 0.5
_A = self.token_ids[indices]
_A = self.lengths[indices]
_A = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCAmelCase ( self ) -> Tuple:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = [t[0] for t in batch]
_A = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
_A = max(lowercase_ )
# Pad token ids
if self.params.mlm:
_A = self.params.special_tok_ids["""pad_token"""]
else:
_A = self.params.special_tok_ids["""unk_token"""]
_A = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
_A = torch.tensor(tk_ ) # (bs, max_seq_len_)
_A = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 716 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_A = get_aligned_output_features_output_indices(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ["""c"""] )
self.assertEqual(UpperCAmelCase__ , [2] )
# Out indices set to match out features
_A = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCAmelCase__ , [0, 2] )
# Out features set to match out indices
_A = get_aligned_output_features_output_indices(UpperCAmelCase__ , [0, 2] , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_A = get_aligned_output_features_output_indices(UpperCAmelCase__ , [-3, -1] , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCAmelCase__ , [-3, -1] )
def UpperCAmelCase ( self ) -> List[str]:
# Stage names must be set
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCAmelCase__ )
# Out features must be a list
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(UpperCAmelCase__ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(UpperCAmelCase__ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def UpperCAmelCase ( self ) -> int:
_A = BackboneMixin()
_A = ['''a''', '''b''', '''c''']
_A = ['''a''', '''c''']
_A = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_A = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
_A = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
def snake_case ( snake_case__ :Any) -> list:
if n_term == "":
return []
_A = []
for temp in range(int(snake_case__)):
series.append(F'''1/{temp + 1}''' if series else """1""")
return series
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 718 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a ( __lowerCamelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCamelCase :List[Any] = '''BlipImageProcessor'''
lowerCamelCase :Union[str, Any] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> str:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
encoding.update(SCREAMING_SNAKE_CASE_ )
_A = self.qformer_tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_A = qformer_text_encoding.pop("""input_ids""" )
_A = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_A = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
_A = os.path.join(SCREAMING_SNAKE_CASE_ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
_A = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""qformer_tokenizer""" )
_A = cls._get_arguments_from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
args.append(SCREAMING_SNAKE_CASE_ )
return cls(*SCREAMING_SNAKE_CASE_ )
| 719 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Optional[Any]) -> Optional[Any]:
print("""Loading config file...""")
def flatten_yaml_as_dict(snake_case__ :Tuple , snake_case__ :Union[str, Any]="" , snake_case__ :Dict="."):
_A = []
for k, v in d.items():
_A = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__).items())
else:
items.append((new_key, v))
return dict(snake_case__)
_A = argparse.Namespace()
with open(snake_case__ , """r""") as yaml_file:
try:
_A = yaml.load(snake_case__ , Loader=yaml.FullLoader)
_A = flatten_yaml_as_dict(snake_case__)
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__)
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__)))
return config
def snake_case ( snake_case__ :Any , snake_case__ :Dict) -> Dict:
_A = MobileViTVaConfig()
_A = False
# dataset
if task_name.startswith("""imagenet1k_"""):
_A = 1_000
if int(task_name.strip().split("""_""")[-1]) == 384:
_A = 384
else:
_A = 256
_A = "imagenet-1k-id2label.json"
elif task_name.startswith("""imagenet21k_to_1k_"""):
_A = 21_000
if int(task_name.strip().split("""_""")[-1]) == 384:
_A = 384
else:
_A = 256
_A = "imagenet-22k-id2label.json"
elif task_name.startswith("""ade20k_"""):
_A = 151
_A = 512
_A = "ade20k-id2label.json"
_A = True
elif task_name.startswith("""voc_"""):
_A = 21
_A = 512
_A = "pascal-voc-id2label.json"
_A = True
# orig_config
_A = load_orig_config_file(snake_case__)
assert getattr(snake_case__ , """model.classification.name""" , -1) == "mobilevit_v2", "Invalid model"
_A = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0)
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A = getattr(snake_case__ , """model.classification.activation.name""" , """swish""")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A = getattr(snake_case__ , """model.segmentation.output_stride""" , 16)
if "_deeplabv3" in task_name:
_A = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36])
_A = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 512)
_A = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1)
# id2label
_A = "huggingface/label-files"
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( snake_case__ :str , snake_case__ :Dict , snake_case__ :Union[str, Any]) -> Optional[int]:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :Tuple , snake_case__ :int=False) -> Dict:
if base_model:
_A = ""
else:
_A = "mobilevitv2."
_A = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A = k[8:]
else:
_A = k
if ".block." in k:
_A = k_new.replace(""".block.""" , """.""")
if ".conv." in k:
_A = k_new.replace(""".conv.""" , """.convolution.""")
if ".norm." in k:
_A = k_new.replace(""".norm.""" , """.normalization.""")
if "conv_1." in k:
_A = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''')
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_A = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''')
if ".exp_1x1." in k:
_A = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""")
if ".red_1x1." in k:
_A = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""")
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_A = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''')
if F'''layer_{i}.1.local_rep.0.''' in k:
_A = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''')
if F'''layer_{i}.1.local_rep.1.''' in k:
_A = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''')
for i in [3, 4, 5]:
if i == 3:
_A = [0, 1]
elif i == 4:
_A = [0, 1, 2, 3]
elif i == 5:
_A = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_A = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''')
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''')
if F'''layer_{i}.1.conv_proj.''' in k:
_A = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''')
if "pre_norm_attn.0." in k:
_A = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""")
if "pre_norm_attn.1." in k:
_A = k_new.replace("""pre_norm_attn.1.""" , """attention.""")
if "pre_norm_ffn.0." in k:
_A = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""")
if "pre_norm_ffn.1." in k:
_A = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""")
if "pre_norm_ffn.3." in k:
_A = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""")
if "classifier.1." in k:
_A = k_new.replace("""classifier.1.""" , """classifier.""")
if "seg_head." in k:
_A = k_new.replace("""seg_head.""" , """segmentation_head.""")
if ".aspp_layer." in k:
_A = k_new.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in k:
_A = k_new.replace(""".aspp_pool.""" , """.""")
rename_keys.append((k, k_new))
return rename_keys
def snake_case ( snake_case__ :int) -> Tuple:
_A = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head."""):
keys_to_ignore.append(snake_case__)
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__)
def snake_case ( ) -> Tuple:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
return im
@torch.no_grad()
def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :Any , snake_case__ :List[str]) -> Any:
_A = get_mobilevitva_config(snake_case__ , snake_case__)
# load original state_dict
_A = torch.load(snake_case__ , map_location="""cpu""")
# load huggingface model
if task_name.startswith("""ade20k_""") or task_name.startswith("""voc_"""):
_A = MobileViTVaForSemanticSegmentation(snake_case__).eval()
_A = False
else:
_A = MobileViTVaForImageClassification(snake_case__).eval()
_A = False
# remove and rename some keys of load the original model
_A = checkpoint
remove_unused_keys(snake_case__)
_A = create_rename_keys(snake_case__ , base_model=snake_case__)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
# load modified state_dict
model.load_state_dict(snake_case__)
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
_A = image_processor(images=prepare_img() , return_tensors="""pt""")
_A = model(**snake_case__)
# verify classification model
if task_name.startswith("""imagenet"""):
_A = outputs.logits
_A = logits.argmax(-1).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("""imagenet1k_256""") and config.width_multiplier == 1.0:
# expected_logits for base variant
_A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01])
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4)
Path(snake_case__).mkdir(exist_ok=snake_case__)
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , """depth_multiplier""" ) )
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=3 , lowerCAmelCase_=32 , lowerCAmelCase_=0.25 , lowerCAmelCase_=8 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=32 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu6" , lowerCAmelCase_=12_80 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=10 , lowerCAmelCase_=None , ) -> Dict:
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = depth_multiplier
_A = depth_divisible_by
_A = min_depth
_A = expand_ratio
_A = tf_padding
_A = output_stride
_A = first_layer_is_expansion
_A = finegrained_output
_A = hidden_act
_A = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
def UpperCAmelCase ( self ) -> int:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = MobileNetVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = self.num_labels
_A = MobileNetVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = self.num_labels
_A = MobileNetVaForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase :Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase :int = False
lowerCamelCase :int = False
lowerCamelCase :str = False
lowerCamelCase :int = False
def UpperCAmelCase ( self ) -> Dict:
_A = MobileNetVaModelTester(self )
_A = MobileNetVaConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> str:
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCAmelCase__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_A = outputs.hidden_states
_A = 16
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
@slow
def UpperCAmelCase ( self ) -> Tuple:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileNetVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def snake_case ( ) -> Tuple:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[Any]:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(UpperCAmelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCAmelCase__ )
# verify the logits
_A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
_A = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_A = model.to(UpperCAmelCase__ )
_A = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_A = prepare_img()
_A = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCAmelCase__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
_A = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( snake_case__ :str) -> Union[str, Any]:
if len(_lowerCAmelCase) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""")
if any(i <= 0 for i in nums):
raise ValueError("""All values must be greater than 0""")
_A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class a ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
_A = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __lowerCamelCase )
def UpperCAmelCase ( self ) -> List[str]:
_A = AutoConfig.from_pretrained("""gpt2""" )
_A = GenerationConfig.from_model_config(__lowerCamelCase )
_A = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> int:
_A = GenerationConfig()
_A = {
'''max_new_tokens''': 10_24,
'''foo''': '''bar''',
}
_A = copy.deepcopy(__lowerCamelCase )
_A = generation_config.update(**__lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"""foo""": """bar"""} )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = GenerationConfig()
_A = '''bar'''
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase )
_A = GenerationConfig.from_pretrained(__lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
_A = GenerationConfig.from_model_config(__lowerCamelCase )
assert not hasattr(__lowerCamelCase , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
_A = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
_A = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> int:
_A = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def UpperCAmelCase ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> str:
_A = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="""test-generation-config""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def UpperCAmelCase ( self ) -> Dict:
_A = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_SCREAMING_SNAKE_CASE = "</w>"
_SCREAMING_SNAKE_CASE = "@@ "
def snake_case ( snake_case__ :Any) -> int:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
return pairs
# Speech2Text2 has no max input length
_SCREAMING_SNAKE_CASE = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class a ( UpperCAmelCase_ ):
"""simple docstring"""
lowerCamelCase :int = VOCAB_FILES_NAMES
lowerCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=False , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> str:
super().__init__(
unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , do_lower_case=_lowercase , **_lowercase , )
_A = do_lower_case
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(_lowercase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> Tuple:
return len(self.decoder )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(_lowercase )
if not pairs:
return token
while True:
_A = min(_lowercase , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(_lowercase ):
try:
_A = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(_lowercase )
_A = new_word
if len(_lowercase ) == 1:
break
else:
_A = get_pairs(_lowercase )
_A = """ """.join(_lowercase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = """\n""" + BPE_TOKEN_MERGES
if word.endswith(_lowercase ):
_A = word.replace(_lowercase , """""" )
_A = word.replace(""" """ , _lowercase )
_A = word
return word
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowercase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = self.decoder.get(_lowercase , self.unk_token )
return result
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_A = """ """.join(_lowercase )
# make sure @@ tokens are concatenated
_A = """""".join(string.split(_lowercase ) )
return string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_SCREAMING_SNAKE_CASE = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case ( snake_case__ :Optional[Any]) -> List[Any]:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""")
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""")
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""")
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""")
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""")
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""")
def snake_case ( snake_case__ :int) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE)
def snake_case ( snake_case__ :Optional[int]) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption("""--make-reports""")
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE)
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict) -> Any:
if exitstatus == 5:
_A = 0
# Doctest custom flag to ignore output.
_SCREAMING_SNAKE_CASE = doctest.register_optionflag('IGNORE_RESULT')
_SCREAMING_SNAKE_CASE = doctest.OutputChecker
class a ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE = CustomOutputChecker
_SCREAMING_SNAKE_CASE = HfDoctestModule
_SCREAMING_SNAKE_CASE = HfDocTestParser
| 703 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
import re
def snake_case ( snake_case__ :Union[str, Any]) -> str:
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_)]
def snake_case ( snake_case__ :Union[str, Any]) -> List[str]:
_A = split_input(str_)
return "".join(
["""""".join([char.capitalize() for char in sub_str]) for sub_str in string_split])
def snake_case ( snake_case__ :List[Any] , snake_case__ :int , snake_case__ :int) -> int:
try:
_A = split_input(snake_case__)
if upper:
_A = """""".join(
[
separator.join([char.upper() for char in sub_str])
for sub_str in string_split
])
else:
_A = """""".join(
[
separator.join([char.lower() for char in sub_str])
for sub_str in string_split
])
return res_str
except IndexError:
return "not valid string"
def snake_case ( snake_case__ :Optional[Any]) -> str:
return to_simple_case(snake_case__)
def snake_case ( snake_case__ :Union[str, Any]) -> List[str]:
try:
_A = to_simple_case(snake_case__)
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case ( snake_case__ :List[str] , snake_case__ :List[Any]) -> Dict:
return to_complex_case(snake_case__ , snake_case__ , """_""")
def snake_case ( snake_case__ :Dict , snake_case__ :List[Any]) -> Tuple:
return to_complex_case(snake_case__ , snake_case__ , """-""")
if __name__ == "__main__":
__import__('doctest').testmod()
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowercase ):
"""simple docstring"""
lowerCamelCase :Tuple = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 705 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = ['''speech''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(self , ["""speech"""] )
class a ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = ['''speech''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
requires_backends(self , ["""speech"""] )
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
import math
def snake_case ( snake_case__ :int) -> str:
_A = 0
_A = 0
while num > 0:
_A = num % 8
_A = octal + (remainder * math.floor(math.pow(10 , snake_case__)))
counter += 1
_A = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(snake_case__)}'''
def snake_case ( ) -> None:
print("""\n2 in octal is:""")
print(decimal_to_octal(2)) # = 2
print("""\n8 in octal is:""")
print(decimal_to_octal(8)) # = 10
print("""\n65 in octal is:""")
print(decimal_to_octal(65)) # = 101
print("""\n216 in octal is:""")
print(decimal_to_octal(216)) # = 330
print("""\n512 in octal is:""")
print(decimal_to_octal(512)) # = 1000
print("""\n""")
if __name__ == "__main__":
main()
| 707 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a ( __A ):
"""simple docstring"""
lowerCamelCase :str = ["""image_processor"""]
lowerCamelCase :str = """SamImageProcessor"""
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(UpperCamelCase__ )
_A = self.image_processor
_A = -10
_A = self.image_processor.size['longest_edge']
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[str]:
_A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
_A = encoding_image_processor['original_sizes']
if hasattr(UpperCamelCase__ , """numpy""" ): # Checks if Torch or TF tensor
_A = original_sizes.numpy()
_A = self._check_and_preprocess_points(
input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , )
_A = self._normalize_and_convert(
UpperCamelCase__ , UpperCamelCase__ , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , )
return encoding_image_processor
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
_A = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] ) for point in input_points
]
else:
_A = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ )
for point, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_A = self._pad_points_and_labels(UpperCamelCase__ , UpperCamelCase__ )
_A = np.array(UpperCamelCase__ )
if input_labels is not None:
_A = np.array(UpperCamelCase__ )
if input_boxes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
_A = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] , is_bounding_box=UpperCamelCase__ )
for box in input_boxes
]
else:
_A = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ , is_bounding_box=UpperCamelCase__ )
for box, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
_A = np.array(UpperCamelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# boxes batch size of 1 by default
_A = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# boxes batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
_A = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
_A = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = max([point.shape[0] for point in input_points] )
_A = []
for i, point in enumerate(UpperCamelCase__ ):
if point.shape[0] != expected_nb_points:
_A = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_A = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase__ )
_A = processed_input_points
return input_points, input_labels
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
_A = original_size
_A = self.image_processor._get_preprocess_shape(UpperCamelCase__ , longest_edge=UpperCamelCase__ )
_A = deepcopy(UpperCamelCase__ ).astype(UpperCamelCase__ )
if is_bounding_box:
_A = coords.reshape(-1 , 2 , 2 )
_A = coords[..., 0] * (new_w / old_w)
_A = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_A = coords.reshape(-1 , 4 )
return coords
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
if input_points is not None:
if hasattr(UpperCamelCase__ , """numpy""" ): # Checks for TF or Torch tensor
_A = input_points.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_points[0] , UpperCamelCase__ ):
raise ValueError("""Input points must be a list of list of floating points.""" )
_A = [np.array(UpperCamelCase__ ) for input_point in input_points]
else:
_A = None
if input_labels is not None:
if hasattr(UpperCamelCase__ , """numpy""" ):
_A = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_labels[0] , UpperCamelCase__ ):
raise ValueError("""Input labels must be a list of list integers.""" )
_A = [np.array(UpperCamelCase__ ) for label in input_labels]
else:
_A = None
if input_boxes is not None:
if hasattr(UpperCamelCase__ , """numpy""" ):
_A = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase__ , UpperCamelCase__ )
or not isinstance(input_boxes[0] , UpperCamelCase__ )
or not isinstance(input_boxes[0][0] , UpperCamelCase__ )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
_A = [np.array(UpperCamelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
_A = None
return input_points, input_labels, input_boxes
@property
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase__ ) )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return self.image_processor.post_process_masks(*UpperCamelCase__ , **UpperCamelCase__ )
| 708 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
_A = pos_x
_A = pos_y
_A = (pos_y, pos_x)
_A = goal_x
_A = goal_y
_A = g_cost
_A = parent
_A = self.calculate_heuristic()
_A = self.g_cost + self.h_cost
def UpperCAmelCase ( self ) -> float:
_A = self.pos_x - self.goal_x
_A = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCAmelCase_ ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
_A = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
_A = [self.start]
_A = []
_A = False
def UpperCAmelCase ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
_A = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
_A = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[Node]:
_A = []
for action in delta:
_A = parent.pos_x + action[1]
_A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[TPosition]:
_A = node
_A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_A = AStar(lowerCamelCase_ , lowerCamelCase_ )
_A = AStar(lowerCamelCase_ , lowerCamelCase_ )
_A = False
def UpperCAmelCase ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_A = self.fwd_astar.open_nodes.pop(0 )
_A = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
_A = current_bwd_node
_A = current_fwd_node
_A = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
_A = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[TPosition]:
_A = self.fwd_astar.retrace_path(lowerCamelCase_ )
_A = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
_A = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 709 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 0 |
def snake_case ( snake_case__ :int = 4_000_000) -> Union[str, Any]:
_A = [0, 1]
_A = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
_A = 0
for j in range(len(snake_case__) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["GLPNFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import os
import pytest
from attr import dataclass
_SCREAMING_SNAKE_CASE = 'us-east-1' # defaults region
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :List[Any] = 42
lowerCamelCase :Union[str, Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
lowerCamelCase :str = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
lowerCamelCase :Optional[Any] = {**hyperparameters, '''max_steps''': 1000}
@property
def UpperCAmelCase ( self ) -> Any:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase ( self ) -> str:
return F'''{self.framework}-transfromers-test'''
@property
def UpperCAmelCase ( self ) -> List[str]:
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCAmelCase ( self ) -> List[Any]:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""")
def snake_case ( snake_case__ :Dict) -> str:
_A = SageMakerTestEnvironment(framework=request.cls.framework)
| 712 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
debug_launcher(test_script.main )
def UpperCAmelCase ( self ) -> Optional[Any]:
debug_launcher(test_ops.main )
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_SCREAMING_SNAKE_CASE = {
'openbmb/cpm-ant-10b': 1_024,
}
def snake_case ( snake_case__ :List[str]) -> Dict:
_A = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""") as reader:
_A = reader.readlines()
for index, token in enumerate(UpperCAmelCase__):
_A = token.rstrip("""\n""")
_A = index
return vocab
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<unk>" , lowerCAmelCase_=2_00 ) -> Dict:
_A = vocab
_A = unk_token
_A = max_input_chars_per_word
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_A = 0
_A = []
while start < len(__UpperCamelCase ):
_A = len(__UpperCamelCase )
_A = None
while start < end:
_A = """""".join(chars[start:end] )
if substr in self.vocab:
_A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
_A = end
return sub_tokens
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = VOCAB_FILES_NAMES
lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = ['''input_ids''', '''attention_mask''']
lowerCamelCase :str = False
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<d>" , lowerCAmelCase_="</d>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="</n>" , lowerCAmelCase_="</_>" , lowerCAmelCase_="left" , **lowerCAmelCase_ , ) -> List[str]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
_A = bod_token
_A = eod_token
_A = load_vocab(__UpperCamelCase )
_A = self.encoder[space_token]
_A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) )
_A = {v: k for k, v in self.encoder.items()}
_A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase ( self ) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase ( self ) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.encoder["\n"]
@property
def UpperCAmelCase ( self ) -> List[Any]:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_A = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
_A = [i for i in token_ids if i >= 0]
_A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
return token in self.encoder
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
return "".join(__UpperCamelCase )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Dict:
if os.path.isdir(__UpperCamelCase ):
_A = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_A = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_A = 0
if " " in self.encoder:
_A = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_A = self.encoder["""\n"""]
del self.encoder["\n"]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
_A = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[str]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase :Optional[bool] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCamelCase :Optional[bool] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCamelCase :Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCamelCase :Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCamelCase :Optional[float] = field(
default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def snake_case ( snake_case__ :ModelArguments , snake_case__ :TrainingArguments) -> List[str]:
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout)] , )
_A = logging.WARNING
if model_args.verbose_logging:
_A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
_A = logging.INFO
logger.setLevel(_UpperCamelCase)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
default=__lowerCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase :Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase :Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase :Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase :Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase :Optional[float] = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :WavaVecaForPreTraining
lowerCamelCase :WavaVecaFeatureExtractor
lowerCamelCase :Union[bool, str] = "longest"
lowerCamelCase :Optional[int] = None
lowerCamelCase :Optional[int] = None
def __call__( self , lowerCAmelCase_ ) -> int:
_A = self.feature_extractor.pad(
lowerCAmelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
_A = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
_A = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_A = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
_A = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_A = 1
_A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_A = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase_ , min_masks=2 , )
return batch
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=1.0 , **lowerCAmelCase_ ) -> Dict:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_A = 0
_A = max_gumbel_temp
_A = min_gumbel_temp
_A = gumbel_temp_decay
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
model.train()
_A = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
_A = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_A = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_A = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case ( ) -> Any:
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase)
# Downloading and loading a dataset from the hub.
_A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase)
def prepare_dataset(snake_case__ :Optional[Any]):
# check that all files have the correct sampling rate
_A , _A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
_A = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names)
# filter audio files that are too long
_A = vectorized_datasets.filter(
lambda snake_case__: len(data["""speech"""]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__ :Optional[int]):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
_A = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm=\'layer\'""")
_A = WavaVecaForPreTraining(_UpperCamelCase)
_A = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase)
_A = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
_A = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase ( self ) -> Tuple:
with self.assertRaises(lowerCAmelCase_ ):
_A = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCAmelCase ( self ) -> Any:
with self.assertRaises(lowerCAmelCase_ ):
_A = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> str:
_A = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase ( self ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_A = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCAmelCase ( self ) -> List[Any]:
_A = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_A = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def UpperCAmelCase ( self ) -> List[str]:
_A = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase ( self ) -> List[str]:
_A = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCAmelCase ( self ) -> Any:
import PIL.Image
_A = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=lowerCAmelCase_ ) as mock_cast_to_python_objects:
_A = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
_A , _A = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , lowerCAmelCase_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def snake_case ( snake_case__ :Any , snake_case__ :Dict) -> int:
_A = pa.BufferReader(snake_case__) if isinstance(snake_case__ , pa.Buffer) else pa.memory_map(snake_case__)
_A = pa.ipc.open_stream(snake_case__)
_A = f.read_all()
assert len(pa_table.to_batches()) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10])
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}])
def snake_case ( snake_case__ :List[str] , snake_case__ :Tuple) -> Optional[Any]:
_A = pa.BufferOutputStream()
_A = pa.schema(snake_case__) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1})
writer.write({"""col_1""": """bar""", """col_2""": 2})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def snake_case ( ) -> Any:
_A = pa.BufferOutputStream()
_A = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""])})
with ArrowWriter(stream=snake_case__ , features=snake_case__) as writer:
writer.write({"""labels""": 0})
writer.write({"""labels""": 1})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_A = pa.BufferReader(output.getvalue())
_A = pa.ipc.open_stream(snake_case__)
_A = f.read_all()
_A = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case__)
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10])
def snake_case ( snake_case__ :Union[str, Any]) -> Tuple:
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2])
_A , _A = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10])
def snake_case ( snake_case__ :Tuple) -> List[Any]:
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10)
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10)
_A , _A = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10])
def snake_case ( snake_case__ :int) -> Optional[Any]:
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1)
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2)
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10])
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}])
def snake_case ( snake_case__ :Tuple , snake_case__ :Dict) -> Dict:
_A = pa.BufferOutputStream()
_A = pa.schema(snake_case__) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]})
writer.write_batch({"""col_1""": [], """col_2""": []})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10])
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}])
def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[int]) -> Any:
_A = pa.BufferOutputStream()
_A = pa.schema(snake_case__) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]}))
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10])
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}])
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any]) -> Tuple:
_A = pa.BufferOutputStream()
_A = pa.schema(snake_case__) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]}))
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]}))
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def snake_case ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_A = os.path.join(snake_case__ , """test.arrow""")
with ArrowWriter(path=snake_case__ , schema=pa.schema(snake_case__)) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata)
_check_output(snake_case__ , 1)
def snake_case ( snake_case__ :Optional[Any]) -> int:
if pa.types.is_list(snake_case__):
return get_base_dtype(arr_type.value_type)
else:
return arr_type
def snake_case ( snake_case__ :Dict , snake_case__ :Optional[Any]) -> List[Any]:
if isinstance(lst[0] , snake_case__):
change_first_primitive_element_in_list(lst[0] , snake_case__)
else:
_A = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32"""), pa.intaa())])
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def snake_case ( snake_case__ :List[str] , snake_case__ :Any , snake_case__ :str) -> int:
_A = pa.array(TypedSequence(snake_case__ , optimized_int_type=snake_case__))
assert get_base_dtype(arr.type) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[str]) -> Any:
# in range
_A = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__))
assert get_base_dtype(arr.type) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_A = copy.deepcopy(snake_case__)
_A = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1
change_first_primitive_element_in_list(snake_case__ , snake_case__)
_A = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__))
assert get_base_dtype(arr.type) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True])
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict) -> str:
_A = str(tmp_path / """dataset-train.arrow""")
try:
with ArrowWriter(path=snake_case__) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case ( snake_case__ :Any) -> str:
_A = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case__ , storage_options=mockfs.storage_options) as writer:
assert isinstance(writer._fs , type(snake_case__))
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1})
writer.write({"""col_1""": """bar""", """col_2""": 2})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case__)
def snake_case ( ) -> Union[str, Any]:
_A = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case__) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1})
writer.write({"""col_1""": """bar""", """col_2""": 2})
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_A = pa.BufferReader(output.getvalue())
_A = pq.read_table(snake_case__)
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True])
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[int]) -> Union[str, Any]:
import PIL.Image
_A = str(tmp_path / """test_image_rgb.jpg""")
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta)).save(snake_case__ , format="""png""")
_A = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case__ , features=Features({"""image""": Image()}) , embed_local_files=snake_case__) as writer:
writer.write({"""image""": image_path})
writer.finalize()
_A = pa.BufferReader(output.getvalue())
_A = pq.read_table(snake_case__)
_A = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , snake_case__)
with open(snake_case__ , """rb""") as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case ( ) -> Optional[Any]:
_A = pa.schema([pa.field("""col_1""" , pa.string() , nullable=snake_case__)])
_A = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case__) as writer:
writer._build_writer(inferred_schema=snake_case__)
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string())])
| 716 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def snake_case ( snake_case__ :str , snake_case__ :str) -> List[Any]:
_A = len([g for position, g in enumerate(snake_case__) if g == main_target[position]])
return (item, float(snake_case__))
def snake_case ( snake_case__ :str , snake_case__ :str) -> Union[str, Any]:
_A = random.randint(0 , len(snake_case__) - 1)
_A = parent_a[:random_slice] + parent_a[random_slice:]
_A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case ( snake_case__ :str , snake_case__ :list[str]) -> Tuple:
_A = list(snake_case__)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_A = random.choice(snake_case__)
return "".join(snake_case__)
def snake_case ( snake_case__ :tuple[str, float] , snake_case__ :list[tuple[str, float]] , snake_case__ :list[str] , ) -> Optional[int]:
_A = []
# Generate more children proportionally to the fitness score.
_A = int(parent_a[1] * 100) + 1
_A = 10 if child_n >= 10 else child_n
for _ in range(snake_case__):
_A = population_score[random.randint(0 , snake_case__)][0]
_A = crossover(parent_a[0] , snake_case__)
# Append new string to the population list.
pop.append(mutate(snake_case__ , snake_case__))
pop.append(mutate(snake_case__ , snake_case__))
return pop
def snake_case ( snake_case__ :str , snake_case__ :list[str] , snake_case__ :bool = True) -> Dict:
if N_POPULATION < N_SELECTED:
_A = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(snake_case__)
# Verify that the target contains no genes besides the ones inside genes variable.
_A = sorted({c for c in target if c not in genes})
if not_in_genes_list:
_A = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(snake_case__)
# Generate random starting population.
_A = []
for _ in range(snake_case__):
population.append("""""".join([random.choice(snake_case__) for i in range(len(snake_case__))]))
# Just some logs to know what the algorithms is doing.
_A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case__)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_A = [evaluate(snake_case__ , snake_case__) for item in population]
# Check if there is a matching evolution.
_A = sorted(snake_case__ , key=lambda snake_case__: x[1] , reverse=snake_case__)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_A = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(snake_case__)
# Normalize population score to be between 0 and 1.
_A = [
(item, score / len(snake_case__)) for item, score in population_score
]
# This is selection
for i in range(snake_case__):
population.extend(select(population_score[int(snake_case__)] , snake_case__ , snake_case__))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case__) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_SCREAMING_SNAKE_CASE = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_SCREAMING_SNAKE_CASE = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_SCREAMING_SNAKE_CASE = BeautifulSoup(res.text, 'html.parser')
_SCREAMING_SNAKE_CASE = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 718 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=64 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> List[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = embedding_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def UpperCAmelCase ( self ) -> List[str]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = MegatronBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = MegatronBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = MegatronBertForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = MegatronBertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = MegatronBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = self.num_labels
_A = MegatronBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self.num_labels
_A = MegatronBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = self.num_choices
_A = MegatronBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase :List[str] = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :Tuple = True
# test_resize_embeddings = False
lowerCamelCase :Any = False
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Dict:
_A = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = MegatronBertModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_ )
def snake_case ( snake_case__ :List[str]) -> List[str]:
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
_SCREAMING_SNAKE_CASE = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("""Model is not available.""" )
def UpperCAmelCase ( self ) -> List[Any]:
_A = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
_A = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_ )
_A = MegatronBertModel.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.half()
_A = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ )[0]
_A = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
_A = output[0, ii, jj]
_A = expected[3 * ii + jj]
_A = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_ ) , msg=lowerCAmelCase_ )
| 719 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 0 |
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCAmelCase_ ) != 0:
_A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise error
_A = rows
else:
_A = []
def UpperCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase ( self ) -> int:
return len(self.rows )
@property
def UpperCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCAmelCase ( self ) -> Matrix:
_A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase_ ).determinant()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase ( self ) -> Matrix:
_A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Matrix:
_A = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCAmelCase_ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> None:
_A = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCAmelCase_ )
else:
_A = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> None:
_A = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase_ ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowerCAmelCase_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase_ ) -> Matrix:
if isinstance(lowerCAmelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , lowerCAmelCase_ ) -> Matrix:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
import numpy as np
from PIL import Image
def snake_case ( snake_case__ , snake_case__ , snake_case__) -> Dict:
_A = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""")
_A = 0
_A = 0
_A = 0
_A = 0
# compute the shape of the output matrix
_A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_A = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_A = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_A = 0
_A = 0
return updated_arr
def snake_case ( snake_case__ , snake_case__ , snake_case__) -> Dict:
_A = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""")
_A = 0
_A = 0
_A = 0
_A = 0
# compute the shape of the output matrix
_A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_A = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_A = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_A = 0
_A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case ( snake_case__ :List[Any]) -> List[str]:
_A = SwinConfig()
_A = swin_name.split("""_""")
_A = name_split[1]
_A = int(name_split[4])
_A = int(name_split[3][-1])
if model_size == "tiny":
_A = 96
_A = (2, 2, 6, 2)
_A = (3, 6, 12, 24)
elif model_size == "small":
_A = 96
_A = (2, 2, 18, 2)
_A = (3, 6, 12, 24)
elif model_size == "base":
_A = 128
_A = (2, 2, 18, 2)
_A = (4, 8, 16, 32)
else:
_A = 192
_A = (2, 2, 18, 2)
_A = (6, 12, 24, 48)
if "in22k" in swin_name:
_A = 21_841
else:
_A = 1_000
_A = """huggingface/label-files"""
_A = """imagenet-1k-id2label.json"""
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = img_size
_A = num_classes
_A = embed_dim
_A = depths
_A = num_heads
_A = window_size
return config
def snake_case ( snake_case__ :List[str]) -> str:
if "patch_embed.proj" in name:
_A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
_A = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
_A = """encoder.""" + name
if "attn.proj" in name:
_A = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
_A = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
_A = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
_A = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
_A = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
_A = name.replace("""mlp.fc2""" , """output.dense""")
if name == "norm.weight":
_A = """layernorm.weight"""
if name == "norm.bias":
_A = """layernorm.bias"""
if "head" in name:
_A = name.replace("""head""" , """classifier""")
else:
_A = """swin.""" + name
return name
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Optional[int]) -> List[Any]:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(snake_case__)
if "mask" in key:
continue
elif "qkv" in key:
_A = key.split(""".""")
_A = int(key_split[1])
_A = int(key_split[3])
_A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[
:dim
]
_A = val[
dim : dim * 2
]
_A = val[
-dim:
]
else:
_A = val
return orig_state_dict
def snake_case ( snake_case__ :List[Any] , snake_case__ :str) -> Tuple:
_A = timm.create_model(snake_case__ , pretrained=snake_case__)
timm_model.eval()
_A = get_swin_config(snake_case__)
_A = SwinForImageClassification(snake_case__)
model.eval()
_A = convert_state_dict(timm_model.state_dict() , snake_case__)
model.load_state_dict(snake_case__)
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""")))
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
_A = image_processor(images=snake_case__ , return_tensors="""pt""")
_A = timm_model(inputs["""pixel_values"""])
_A = model(**snake_case__).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3)
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
def snake_case ( snake_case__ :type , snake_case__ :Optional[str] , snake_case__ :Optional[List[str]] = None , ) -> str:
_A = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''')
_A = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''')
_A = format_type
def snake_case ( snake_case__ :Exception , snake_case__ :Optional[str] , snake_case__ :Optional[List[str]] = None) -> Optional[int]:
_A = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_A = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
_SCREAMING_SNAKE_CASE = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
_SCREAMING_SNAKE_CASE = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
_SCREAMING_SNAKE_CASE = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def snake_case ( snake_case__ :Optional[str]) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case ( snake_case__ :Optional[str] , **snake_case__ :Union[str, Any]) -> Formatter:
_A = get_format_type_from_alias(snake_case__)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case__)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got \'{format_type}\'''')
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
_A = None
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_A = self._infer_socket_ifname()
# avoid clash with the NCCL port
_A = str(distributed_port + 1 )
_A = dist.new_group(ranks=lowerCAmelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase ( self ) -> int:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ) -> List[str]:
_A = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def UpperCAmelCase ( self ) -> Any:
_A = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_A = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCAmelCase_ )
return ifname
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_A , _A = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
_A = dist.get_world_size(group=self.process_group )
# gather logic
_A = None
if self._is_main():
_A = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
_A = question_hidden_states.shape[0]
_A = []
_A = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
_A , _A = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
_A , _A = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
_A = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_A = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 703 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None ) -> None:
if components is None:
_A = []
_A = list(lowerCAmelCase_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase_ ) -> Vector:
_A = len(self )
if size == len(lowerCAmelCase_ ):
_A = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase_ ) -> Vector:
_A = len(self )
if size == len(lowerCAmelCase_ ):
_A = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase_ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase_ ) -> float:
...
def __mul__( self , lowerCAmelCase_ ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
_A = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
_A = len(self )
_A = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("""invalid operand!""" )
def UpperCAmelCase ( self ) -> Vector:
return Vector(self.__components )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
_A = value
def UpperCAmelCase ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_A = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> float:
_A = self * other
_A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( snake_case__ :int) -> Vector:
assert isinstance(snake_case__ , snake_case__)
return Vector([0] * dimension)
def snake_case ( snake_case__ :int , snake_case__ :int) -> Vector:
assert isinstance(snake_case__ , snake_case__) and (isinstance(snake_case__ , snake_case__))
_A = [0] * dimension
_A = 1
return Vector(snake_case__)
def snake_case ( snake_case__ :float , snake_case__ :Vector , snake_case__ :Vector) -> Vector:
assert (
isinstance(snake_case__ , snake_case__)
and isinstance(snake_case__ , snake_case__)
and (isinstance(snake_case__ , (int, float)))
)
return x * scalar + y
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :int) -> Vector:
random.seed(snake_case__)
_A = [random.randint(snake_case__ , snake_case__) for _ in range(snake_case__)]
return Vector(snake_case__)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_A = matrix
_A = w
_A = h
def __str__( self ) -> str:
_A = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_A = []
for i in range(self.__height ):
_A = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_A = []
for i in range(self.__height ):
_A = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase_ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase_ ) -> Vector:
...
def __mul__( self , lowerCAmelCase_ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
_A = zero_vector(self.__height )
for i in range(self.__height ):
_A = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
_A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def UpperCAmelCase ( self ) -> int:
return self.__height
def UpperCAmelCase ( self ) -> int:
return self.__width
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_A = value
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
_A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("""Indices out of bounds""" )
def UpperCAmelCase ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_A = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( snake_case__ :int) -> Matrix:
_A = [[0] * n for _ in range(snake_case__)]
return Matrix(snake_case__ , snake_case__ , snake_case__)
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :int , snake_case__ :int) -> Matrix:
random.seed(snake_case__)
_A = [
[random.randint(snake_case__ , snake_case__) for _ in range(snake_case__)] for _ in range(snake_case__)
]
return Matrix(snake_case__ , snake_case__ , snake_case__)
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase_ ):
_A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_A = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_A = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 705 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
_A = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Tuple:
_A = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Any:
_A = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> int:
_A = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_A = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_A = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Any:
# pass variant but use the non-variant filenames
_A = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
_A = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Dict:
_A = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Tuple:
_A = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
_A = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
_A = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
_A = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> List[str]:
_A = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_A = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 707 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''falcon'''
lowerCamelCase :str = ['''past_key_values''']
def __init__( self , lowerCAmelCase_=6_50_24 , lowerCAmelCase_=45_44 , lowerCAmelCase_=32 , lowerCAmelCase_=71 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=11 , lowerCAmelCase_=11 , **lowerCAmelCase_ , ) -> int:
_A = vocab_size
# Backward compatibility with n_embed kwarg
_A = kwargs.pop("""n_embed""" , lowerCAmelCase_ )
_A = hidden_size if n_embed is None else n_embed
_A = num_hidden_layers
_A = num_attention_heads
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
_A = hidden_dropout
_A = attention_dropout
_A = bos_token_id
_A = eos_token_id
_A = num_attention_heads if num_kv_heads is None else num_kv_heads
_A = alibi
_A = new_decoder_architecture
_A = multi_query # Ignored when new_decoder_architecture is True
_A = parallel_attn
_A = bias
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> Any:
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase ( self ) -> str:
return not self.alibi
| 708 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 0 |
_SCREAMING_SNAKE_CASE = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def snake_case ( snake_case__ :str) -> int:
_A = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
_A = 0
_A = 0
while place < len(snake_case__):
if (place + 1 < len(snake_case__)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( snake_case__ :int) -> str:
_A = []
for arabic, roman in ROMAN:
((_A) , (_A)) = divmod(snake_case__ , snake_case__)
result.append(roman * factor)
if number == 0:
break
return "".join(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = (KDPMaDiscreteScheduler,)
lowerCamelCase :str = 10
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
_A = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase_ )
return config
def UpperCAmelCase ( self ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase ( self ) -> str:
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if str(lowerCAmelCase_ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 710 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Any:
_A = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A = len(lowerCAmelCase_ ) - 1
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase_ ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = self.basis_function(lowerCAmelCase_ )
_A = 0.0
_A = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self , lowerCAmelCase_ = 0.01 ) -> str:
from matplotlib import pyplot as plt # type: ignore
_A = [] # x coordinates of points to plot
_A = [] # y coordinates of points to plot
_A = 0.0
while t <= 1:
_A = self.bezier_curve_function(lowerCAmelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A = [i[0] for i in self.list_of_points]
_A = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase_ , lowerCAmelCase_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowerCAmelCase_ , lowerCAmelCase_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 711 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> List[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_A = eval_examples
_A = post_process_function
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = "eval" ) -> Tuple:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(lowerCAmelCase_ )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
lowerCAmelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions )
_A = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" ) -> Dict:
_A = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
lowerCAmelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions , """predict""" )
_A = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 712 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
lowerCamelCase :str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase ( self ) -> str:
_A = self.task_name.lower()
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''train'''
lowerCamelCase :Optional[int] = '''dev'''
lowerCamelCase :Any = '''test'''
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :GlueDataTrainingArguments
lowerCamelCase :str
lowerCamelCase :List[InputFeatures]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = Split.train , lowerCAmelCase_ = None , ) -> List[Any]:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCAmelCase_ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(lowerCAmelCase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase ( self ) -> int:
return self.label_list
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''speech_to_text_2'''
lowerCamelCase :Dict = ['''past_key_values''']
lowerCamelCase :Dict = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=10_24 , **lowerCAmelCase_ , ) -> List[Any]:
_A = vocab_size
_A = d_model
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = decoder_layerdrop
_A = use_cache
_A = decoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def snake_case ( snake_case__ :Optional[int] , snake_case__ :int) -> List[Any]:
_A = tmp_path_factory.mktemp("""dset_infos_dir""")
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""")
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""""")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""") as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""")
_A = DatasetInfosDict.from_directory(snake_case__)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :DatasetInfo) -> Any:
_A = str(snake_case__)
dataset_info.write_to_directory(snake_case__)
_A = DatasetInfo.from_directory(snake_case__)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , """dataset_info.json"""))
def snake_case ( ) -> str:
_A = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""")}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
_A = dataset_info._to_yaml_dict()
assert sorted(snake_case__) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str))
_A = yaml.safe_dump(snake_case__)
_A = yaml.safe_load(snake_case__)
assert dataset_info_yaml_dict == reloaded
def snake_case ( ) -> Optional[Any]:
_A = DatasetInfo()
_A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()}),
DatasetInfosDict({"""my_config_name""": DatasetInfo()}),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
}),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42),
"""v2""": DatasetInfo(dataset_size=1_337),
}),
] , )
def snake_case ( snake_case__ :Tuple , snake_case__ :DatasetInfosDict) -> List[str]:
_A = str(snake_case__)
dataset_infos_dict.write_to_directory(snake_case__)
_A = DatasetInfosDict.from_directory(snake_case__)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , """README.md"""))
| 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = GPTSanJapaneseTokenizer
lowerCamelCase :List[Any] = False
lowerCamelCase :List[str] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCAmelCase ( self ) -> int:
super().setUp()
# fmt: off
_A = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
_A = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
_A = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A , _A = self.get_input_output_texts(lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Any:
_A = self.get_tokenizer()
# Testing tokenization
_A = """こんにちは、世界。 こんばんは、㔺界。"""
_A = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A = tokens + [tokenizer.unk_token]
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_tokenizer()
# Testing tokenization
_A = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
_A = """こんにちは、、、、世界。こんばんは、、、、世界。"""
_A = tokenizer.encode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_A = """こんにちは、世界。"""
_A = """こんばんは、㔺界。😀"""
_A = """こんにちは、世界。こんばんは、世界。😀"""
_A = tokenizer.encode(prefix_text + input_text )
_A = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
_A = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_A = """こんにちは、世界。"""
_A = """こんばんは、㔺界。😀"""
_A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A = [1] + [0] * (len_prefix + len_text + 1)
_A = [1] * (len_prefix + len_text + 1) + [0]
_A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A = tokenizer(prefix_text + input_text ).token_type_ids
_A = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
_A = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_A = tokenizer.encode("""あンいワ""" )
_A = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
_A = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_A = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self ) -> Dict:
# tokenizer has no padding token
pass
| 716 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''camembert'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = 'Hello, World!'
_SCREAMING_SNAKE_CASE = 'en_XX'
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :bool) -> Tuple:
_A = Path("""data_bin""")
_A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__).parent) , checkpoint_file=Path(snake_case__).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(snake_case__) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(snake_case__).parent / """sentencepiece.bpe.model""") , src_dict=str(data_dir / """dict.txt""") , )
xmod.eval() # disable dropout
print(snake_case__)
_A = xmod.model.encoder.sentence_encoder
_A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_A = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , snake_case__)
_A = XmodForSequenceClassification(snake_case__) if classification_head else XmodForMaskedLM(snake_case__)
model.eval()
# Now let's copy all the weights.
# Embeddings
_A = xmod_sent_encoder.embed_tokens.weight
_A = xmod_sent_encoder.embed_positions.weight
_A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
_A = xmod_sent_encoder.layernorm_embedding.weight
_A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
_A = model.roberta.encoder.layer[i]
_A = xmod_sent_encoder.layers[i]
# self attention
_A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError("""Dimensions of self-attention weights do not match.""")
_A = xmod_layer.self_attn.q_proj.weight
_A = xmod_layer.self_attn.q_proj.bias
_A = xmod_layer.self_attn.k_proj.weight
_A = xmod_layer.self_attn.k_proj.bias
_A = xmod_layer.self_attn.v_proj.weight
_A = xmod_layer.self_attn.v_proj.bias
# self-attention output
_A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""")
_A = xmod_layer.self_attn.out_proj.weight
_A = xmod_layer.self_attn.out_proj.bias
_A = xmod_layer.self_attn_layer_norm.weight
_A = xmod_layer.self_attn_layer_norm.bias
# intermediate
_A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""")
_A = xmod_layer.fca.weight
_A = xmod_layer.fca.bias
# output
_A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""")
_A = xmod_layer.fca.weight
_A = xmod_layer.fca.bias
_A = xmod_layer.final_layer_norm.weight
_A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_A = xmod_layer.adapter_layer_norm.weight
_A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError("""Lists of language adapters do not match.""")
for lang_code, adapter in xmod_layer.adapter_modules.items():
_A = bert_output.adapter_modules[lang_code]
_A = xmod_layer.adapter_modules[lang_code]
_A = from_adapter.fca.weight
_A = from_adapter.fca.bias
_A = from_adapter.fca.weight
_A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_A = xmod_sent_encoder.layer_norm.weight
_A = xmod_sent_encoder.layer_norm.bias
if classification_head:
_A = xmod.model.classification_heads["""mnli"""].dense.weight
_A = xmod.model.classification_heads["""mnli"""].dense.bias
_A = xmod.model.classification_heads["""mnli"""].out_proj.weight
_A = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_A = xmod.model.encoder.lm_head.dense.weight
_A = xmod.model.encoder.lm_head.dense.bias
_A = xmod.model.encoder.lm_head.layer_norm.weight
_A = xmod.model.encoder.lm_head.layer_norm.bias
_A = xmod.model.encoder.lm_head.weight
_A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A = xmod.encode(snake_case__).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(snake_case__)
_A = model(snake_case__)[0]
if classification_head:
_A = xmod.model.classification_heads["""mnli"""](xmod.extract_features(snake_case__))
else:
_A = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
_A = torch.max(torch.abs(our_output - their_output)).item()
print(F'''max_absolute_diff = {max_absolute_diff}''') # ~ 1e-7
_A = torch.allclose(snake_case__ , snake_case__ , atol=1E-3)
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""")
if not success:
raise Exception("""Something went wRoNg""")
Path(snake_case__).mkdir(parents=snake_case__ , exist_ok=snake_case__)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 718 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_A = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_A = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , """snapshots""" ) )]
_A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ )
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.random.PRNGKey(0 )
_A = 4
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
_A = replicate(lowerCAmelCase_ )
_A = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
_A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def UpperCAmelCase ( self ) -> List[Any]:
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase_ )
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
_A = replicate(lowerCAmelCase_ )
_A = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCAmelCase ( self ) -> Optional[Any]:
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
_A = replicate(lowerCAmelCase_ )
_A = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCAmelCase ( self ) -> Optional[Any]:
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
_A = replicate(lowerCAmelCase_ )
_A = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
_A = scheduler.create_state()
_A = scheduler_state
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
_A = replicate(lowerCAmelCase_ )
_A = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCAmelCase ( self ) -> str:
_A = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_A = jax.device_count()
_A = num_samples * [prompt]
_A = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
_A = replicate(lowerCAmelCase_ )
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_A = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
_A = replicate(lowerCAmelCase_ )
_A = pipeline.prepare_inputs(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_A = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 719 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = StableUnCLIPPipeline
lowerCamelCase :Tuple = TEXT_TO_IMAGE_PARAMS
lowerCamelCase :Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase :Dict = False
def UpperCAmelCase ( self ) -> str:
_A = 32
_A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
_A = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_A = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
_A = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_A = AutoencoderKL()
_A = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> int:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
_A = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe("""anime turle""" , generator=lowerCAmelCase_ , output_type="""np""" )
_A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_A = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> str:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Optional[Any]:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> int:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCamelCase :str = '''BlipImageProcessor'''
lowerCamelCase :Optional[Any] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
encoding.update(lowerCAmelCase_ )
_A = self.qformer_tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = qformer_text_encoding.pop("""input_ids""" )
_A = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_A = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ )
return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
_A = AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder="""qformer_tokenizer""" )
_A = cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
args.append(lowerCAmelCase_ )
return cls(*lowerCAmelCase_ )
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_SCREAMING_SNAKE_CASE = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case ( snake_case__ :list[list[int]]) -> list[list[int]]:
_A = []
for i in range(len(snake_case__)):
_A = []
for j in range(len(cells[i])):
# Get the number of live neighbours
_A = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i]) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i]) - 1:
neighbour_count += cells[i][j + 1]
if i < len(snake_case__) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(snake_case__) - 1:
neighbour_count += cells[i + 1][j]
if i < len(snake_case__) - 1 and j < len(cells[i]) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_A = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1)
else:
next_generation_row.append(0)
next_generation.append(snake_case__)
return next_generation
def snake_case ( snake_case__ :list[list[int]] , snake_case__ :int) -> list[Image.Image]:
_A = []
for _ in range(snake_case__):
# Create output image
_A = Image.new("""RGB""" , (len(cells[0]), len(snake_case__)))
_A = img.load()
# Save cells to image
for x in range(len(snake_case__)):
for y in range(len(cells[0])):
_A = 255 - cells[y][x] * 255
_A = (colour, colour, colour)
# Save image
images.append(snake_case__)
_A = new_generation(snake_case__)
return images
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 703 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
def snake_case ( snake_case__ :float) -> float:
return 10 - x * x
def snake_case ( snake_case__ :float , snake_case__ :float) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__) * equation(snake_case__) >= 0:
raise ValueError("""Wrong space!""")
_A = a
while (b - a) >= 0.01:
# Find middle point
_A = (a + b) / 2
# Check if middle point is root
if equation(snake_case__) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__) * equation(snake_case__) < 0:
_A = c
else:
_A = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = field
_A = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths}
_A = Json(
cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , field=lowerCAmelCase_ , **lowerCAmelCase_ , )
def UpperCAmelCase ( self ) -> int:
# Build iterable dataset
if self.streaming:
_A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , )
_A = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A = dataset
_A = path_or_buf
_A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A = num_proc
_A = """utf-8"""
_A = to_json_kwargs
def UpperCAmelCase ( self ) -> int:
_A = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase_ )
_A = self.to_json_kwargs.pop("""orient""" , """records""" )
_A = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
_A = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
_A = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase_ ) as buffer:
_A = self._write(file_obj=lowerCAmelCase_ , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
_A = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A , _A , _A , _A , _A = args
_A = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
_A = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase_ , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **lowerCAmelCase_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ , ) -> int:
_A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_A = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCAmelCase_ )
else:
_A , _A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase_ )
return written
| 705 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :str) -> int:
_A = torch.load(snake_case__ , map_location="""cpu""")
if "model" in sd.keys():
_A = torch.load(snake_case__ , map_location="""cpu""")["""model"""]
# pop unnecessary weights
_A = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__)
_A = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_A = sd.pop(snake_case__)
_A = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
_A = sd[key]
# We split QKV in separate Q,K,V
_A = key.replace(""".qkv_proj.""" , """.q_proj.""")
_A = key.replace(""".qkv_proj.""" , """.k_proj.""")
_A = key.replace(""".qkv_proj.""" , """.v_proj.""")
_A = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_A , _A , _A = torch.split(snake_case__ , depth // 3 , dim=0)
_A = q
_A = k
_A = v
del sd[key]
return sd
@torch.no_grad()
def snake_case ( snake_case__ :List[Any] , snake_case__ :Tuple , snake_case__ :List[str]=None) -> Tuple:
_A = load_checkpoint(snake_case__)
if config is not None:
_A = OPTConfig.from_pretrained(snake_case__)
else:
_A = OPTConfig()
_A = OPTModel(snake_case__).half().eval()
model.load_state_dict(snake_case__)
# Check results
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_SCREAMING_SNAKE_CASE = 'bert-base-cased'
_SCREAMING_SNAKE_CASE = 'fp16'
_SCREAMING_SNAKE_CASE = 'bf16'
_SCREAMING_SNAKE_CASE = [FPaa, BFaa]
@require_fsdp
@require_cuda
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
_A = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def UpperCAmelCase ( self ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCAmelCase_ ):
_A = self.dist_env.copy()
_A = F'''{i + 1}'''
_A = strategy
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCAmelCase_ ):
_A = self.dist_env.copy()
_A = prefetch_policy
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCAmelCase ( self ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCAmelCase_ ):
_A = self.dist_env.copy()
_A = state_dict_type
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = AutoModel.from_pretrained(lowerCAmelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
_A = self.dist_env.copy()
_A = policy
if policy == "TRANSFORMER_BASED_WRAP":
_A = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
_A = """2000"""
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCAmelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_A = self.dist_env.copy()
_A = """TRANSFORMER_BASED_WRAP"""
_A = """T5Layer"""
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCAmelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCAmelCase_ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
_A = self.dist_env.copy()
_A = """SIZE_BASED_WRAP"""
_A = """0"""
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCAmelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCAmelCase ( self ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_A = self.dist_env.copy()
_A = mp_dtype
with mockenv_context(**lowerCAmelCase_ ):
_A = Accelerator()
if mp_dtype == "fp16":
_A = torch.floataa
elif mp_dtype == "bf16":
_A = torch.bfloataa
_A = MixedPrecision(param_dtype=lowerCAmelCase_ , reduce_dtype=lowerCAmelCase_ , buffer_dtype=lowerCAmelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCAmelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCAmelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_A = self.dist_env.copy()
_A = str(lowerCAmelCase_ ).lower()
with mockenv_context(**lowerCAmelCase_ ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCAmelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
_A = 0.82
_A = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
_A = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_A = 1_60
_A = 1_60
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def UpperCAmelCase ( self ) -> List[str]:
_A = os.path.join(self.test_scripts_folder , """test_performance.py""" )
_A = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
_A = cmd.copy()
for i, strategy in enumerate(lowerCAmelCase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
def UpperCAmelCase ( self ) -> List[str]:
_A = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
_A = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCAmelCase_ ):
_A = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_A = len(lowerCAmelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_A = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
_A = cmd_config[:-1]
_A = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
def UpperCAmelCase ( self ) -> Any:
_A = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
_A = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_A = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCAmelCase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 707 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_SCREAMING_SNAKE_CASE = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> int:
_A = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
_A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_A = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_ , repo_id="""test-model-flax""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def UpperCAmelCase ( self ) -> List[str]:
_A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_A = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_A = flatten_dict(unfreeze(model.params ) )
_A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def snake_case ( snake_case__ :int , snake_case__ :Dict) -> Optional[Any]:
_A = True
_A = flatten_dict(modela.params)
_A = flatten_dict(modela.params)
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key])) > 1E-4:
_A = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
_A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A = FlaxBertModel(lowerCAmelCase_ )
_A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Any:
_A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A = FlaxBertModel(lowerCAmelCase_ )
_A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase ( self ) -> int:
_A = """bert"""
_A = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = """bert"""
_A = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCAmelCase_ ):
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
_A = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
| 708 | from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_SCREAMING_SNAKE_CASE = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def snake_case ( snake_case__ :Any , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Optional[Any] , snake_case__ :List[Any]) -> Optional[Any]:
for attribute in key.split("""."""):
_A = getattr(snake_case__ , snake_case__)
if weight_type is not None:
_A = getattr(snake_case__ , snake_case__).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[Any]) -> Optional[int]:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
_A = True
if "*" in mapped_key:
_A = name.split(snake_case__)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , snake_case__)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_A = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_A = """weight"""
else:
_A = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case ( snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :Union[str, Any]) -> List[Any]:
_A = full_name.split("""conv_layers.""")[-1]
_A = name.split(""".""")
_A = int(items[0])
_A = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
@torch.no_grad()
def snake_case ( snake_case__ :int , snake_case__ :Dict , snake_case__ :Any=None) -> Any:
# load the pre-trained checkpoints
_A = torch.load(snake_case__)
_A = WavLMConfigOrig(checkpoint["""cfg"""])
_A = WavLMOrig(snake_case__)
model.load_state_dict(checkpoint["""model"""])
model.eval()
if config_path is not None:
_A = WavLMConfig.from_pretrained(snake_case__)
else:
_A = WavLMConfig()
_A = WavLMModel(snake_case__)
recursively_load_weights(snake_case__ , snake_case__)
hf_wavlm.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 709 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = ['''input_features''', '''is_longer''']
def __init__( self , lowerCAmelCase_=64 , lowerCAmelCase_=4_80_00 , lowerCAmelCase_=4_80 , lowerCAmelCase_=10 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.0 , lowerCAmelCase_=False , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 1_40_00 , lowerCAmelCase_ = None , lowerCAmelCase_ = "fusion" , lowerCAmelCase_ = "repeatpad" , **lowerCAmelCase_ , ) -> Any:
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = top_db
_A = truncation
_A = padding
_A = fft_window_size
_A = (fft_window_size >> 1) + 1
_A = hop_length
_A = max_length_s
_A = max_length_s * sampling_rate
_A = sampling_rate
_A = frequency_min
_A = frequency_max
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm=lowerCAmelCase_ , mel_scale="""htk""" , )
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase ( self ) -> Dict[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> np.ndarray:
_A = spectrogram(
lowerCAmelCase_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCAmelCase_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
# randomly choose index for each part
_A = np.random.choice(ranges[0] )
_A = np.random.choice(ranges[1] )
_A = np.random.choice(ranges[2] )
_A = mel[idx_front : idx_front + chunk_frames, :]
_A = mel[idx_middle : idx_middle + chunk_frames, :]
_A = mel[idx_back : idx_back + chunk_frames, :]
_A = torch.tensor(mel[None, None, :] )
_A = torch.nn.functional.interpolate(
lowerCAmelCase_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCAmelCase_ )
_A = mel_shrink[0][0].numpy()
_A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_A = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_A = len(lowerCAmelCase_ ) - max_length
_A = np.random.randint(0 , overflow + 1 )
_A = waveform[idx : idx + max_length]
_A = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_A = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters )
_A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_A = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_A = np.stack([mel, mel, mel, mel] , axis=0 )
_A = False
else:
_A = self._random_mel_fusion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_A = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_A = int(max_length / len(lowerCAmelCase_ ) )
_A = np.stack(np.tile(lowerCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_A = int(max_length / len(lowerCAmelCase_ ) )
_A = np.stack(np.tile(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = np.pad(lowerCAmelCase_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
_A = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters )
_A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_A = self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchFeature:
_A = truncation if truncation is not None else self.truncation
_A = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_A = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
_A = np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray(lowerCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_A = [
self._get_input_mel(lowerCAmelCase_ , max_length if max_length else self.nb_max_samples , lowerCAmelCase_ , lowerCAmelCase_ )
for waveform in raw_speech
]
_A = []
_A = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase_ )
is_longer.append(lowerCAmelCase_ )
if truncation == "fusion" and sum(lowerCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_A = np.random.randint(0 , len(lowerCAmelCase_ ) )
_A = True
if isinstance(input_mel[0] , lowerCAmelCase_ ):
_A = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_A = [[longer] for longer in is_longer]
_A = {"""input_features""": input_mel, """is_longer""": is_longer}
_A = BatchFeature(lowerCAmelCase_ )
if return_tensors is not None:
_A = input_features.convert_to_tensors(lowerCAmelCase_ )
return input_features
| 710 | import math
import unittest
def snake_case ( snake_case__ :int) -> bool:
assert isinstance(snake_case__ , snake_case__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = question_encoder
_A = generator
_A = self.question_encoder
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , """question_encoder_tokenizer""" )
_A = os.path.join(lowerCAmelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop("""config""" , lowerCAmelCase_ )
if config is None:
_A = RagConfig.from_pretrained(lowerCAmelCase_ )
_A = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_A = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.question_encoder
def UpperCAmelCase ( self ) -> Any:
_A = self.generator
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "longest" , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , lowerCAmelCase_ , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = labels["""input_ids"""]
return model_inputs
| 711 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_SCREAMING_SNAKE_CASE = 'pytorch_model.bin'
_SCREAMING_SNAKE_CASE = 'pytorch_model.bin.index.json'
_SCREAMING_SNAKE_CASE = 'adapter_config.json'
_SCREAMING_SNAKE_CASE = 'adapter_model.bin'
_SCREAMING_SNAKE_CASE = 'adapter_model.safetensors'
_SCREAMING_SNAKE_CASE = 'tf_model.h5'
_SCREAMING_SNAKE_CASE = 'tf_model.h5.index.json'
_SCREAMING_SNAKE_CASE = 'model.ckpt'
_SCREAMING_SNAKE_CASE = 'flax_model.msgpack'
_SCREAMING_SNAKE_CASE = 'flax_model.msgpack.index.json'
_SCREAMING_SNAKE_CASE = 'model.safetensors'
_SCREAMING_SNAKE_CASE = 'model.safetensors.index.json'
_SCREAMING_SNAKE_CASE = 'config.json'
_SCREAMING_SNAKE_CASE = 'preprocessor_config.json'
_SCREAMING_SNAKE_CASE = FEATURE_EXTRACTOR_NAME
_SCREAMING_SNAKE_CASE = 'generation_config.json'
_SCREAMING_SNAKE_CASE = 'modelcard.json'
_SCREAMING_SNAKE_CASE = '▁'
_SCREAMING_SNAKE_CASE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_SCREAMING_SNAKE_CASE = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_SCREAMING_SNAKE_CASE = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_SCREAMING_SNAKE_CASE = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case ( snake_case__ :int) -> Any:
if version.parse(snake_case__) < version.parse(snake_case__):
if "dev" in min_version:
_A = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
_A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""")
| 712 | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ) -> Tuple:
super().__init__()
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = False
_A = nn.Dropout(p=lowerCAmelCase_ )
_A = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
_A = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
_A = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
_A = TaLayerNorm(lowerCAmelCase_ )
_A = nn.Dropout(p=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = self.token_embedder(lowerCAmelCase_ )
_A = encoder_input_tokens.shape[1]
_A = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
_A = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
_A = encoder_input_tokens.size()
_A = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
_A = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
_A = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 0 |
_SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = CanineTokenizer
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> Dict:
super().setUp()
_A = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> CanineTokenizer:
_A = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_A = 10_24
return tokenizer
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = self.canine_tokenizer
_A = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_A = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.canine_tokenizer
_A = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertIn("""token_type_ids""" , lowerCAmelCase_ )
@require_torch
def UpperCAmelCase ( self ) -> Any:
_A = self.canine_tokenizer
_A = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_A = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase ( self ) -> int:
# safety check on max_len default value so we are sure the test works
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_A = chr(0xe_007 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A , _A = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_A = 0xe_005
_A = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_A = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_A = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase ( self ) -> int:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = chr(0xe_005 )
_A = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_A = tokenizer.tokenize(lowerCAmelCase_ )
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def UpperCAmelCase ( self ) -> Any:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
_A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
_A = [new_token_a]
_A = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_A = 0xe_007
_A = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_A = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = """hello world"""
if self.space_between_special_tokens:
_A = """[CLS] hello world [SEP]"""
else:
_A = input
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_A = """a"""
_A = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] )
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
| 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Dict:
_A = path_or_paths
_A = split if split or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else """train"""
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Dict:
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 716 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = random.Random()
def snake_case ( snake_case__ :int , snake_case__ :Tuple=1.0 , snake_case__ :Optional[Any]=None , snake_case__ :Any=None) -> Any:
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=4_00 , lowerCAmelCase_=20_00 , lowerCAmelCase_=1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_60_00 , lowerCAmelCase_=True , lowerCAmelCase_=80 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_="hann_window" , lowerCAmelCase_=80 , lowerCAmelCase_=76_00 , lowerCAmelCase_=1E-10 , lowerCAmelCase_=True , ) -> int:
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = feature_size
_A = padding_value
_A = sampling_rate
_A = do_normalize
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = fmin
_A = fmax
_A = mel_floor
_A = return_attention_mask
def UpperCAmelCase ( self ) -> Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Optional[Any]:
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
if equal_length:
_A = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = SpeechTaFeatureExtractor
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = SpeechTaFeatureExtractionTester(self )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_A = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_A = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values
_A = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> Any:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = ["""longest""", """max_length""", """do_not_pad"""]
_A = [None, 16_00, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors="""np""" )
_A = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = range(8_00 , 14_00 , 2_00 )
_A = [floats_list((1, x) )[0] for x in lengths]
_A = ["""longest""", """max_length""", """do_not_pad"""]
_A = [None, 16_00, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
_A = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
_A = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
_A = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def UpperCAmelCase ( self ) -> int:
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = np.random.rand(1_00 ).astype(np.floataa )
_A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_A = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase ( self ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_A = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_A = feature_extractor(audio_target=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_A = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_A = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_A = np.asarray(lowerCAmelCase_ )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> int:
_A = self.feat_extract_tester.prepare_inputs_for_target()
_A = self.feature_extraction_class(**self.feat_extract_dict )
_A = feat_extract.model_input_names[0]
_A = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_A = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_A = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_A = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_A = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_A = self.feature_extraction_class(**self.feat_extract_dict )
_A = feat_extract.model_input_names[0]
_A = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_A = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_A = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
_A = self.feature_extraction_class(**self.feat_extract_dict )
_A = self.feat_extract_tester.prepare_inputs_for_target()
_A = feat_extract.model_input_names[0]
_A = BatchFeature({input_name: speech_inputs} )
_A = feat_extract.num_mel_bins # hack!
_A = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""np""" )[input_name]
_A = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.feat_extract_dict
_A = True
_A = self.feature_extraction_class(**lowerCAmelCase_ )
_A = self.feat_extract_tester.prepare_inputs_for_target()
_A = [len(lowerCAmelCase_ ) for x in speech_inputs]
_A = feat_extract.model_input_names[0]
_A = BatchFeature({input_name: speech_inputs} )
_A = feat_extract.num_mel_bins # hack!
_A = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.feat_extract_dict
_A = True
_A = self.feature_extraction_class(**lowerCAmelCase_ )
_A = self.feat_extract_tester.prepare_inputs_for_target()
_A = [len(lowerCAmelCase_ ) for x in speech_inputs]
_A = feat_extract.model_input_names[0]
_A = BatchFeature({input_name: speech_inputs} )
_A = min(lowerCAmelCase_ )
_A = feat_extract.num_mel_bins # hack!
_A = feat_extract.pad(
lowerCAmelCase_ , padding="""max_length""" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
from datasets import load_dataset
_A = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A = ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self ) -> Any:
# fmt: off
_A = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
_A = self._load_datasamples(1 )
_A = SpeechTaFeatureExtractor()
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase_ , atol=1E-6 ) )
def UpperCAmelCase ( self ) -> List[Any]:
# fmt: off
_A = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_A = self._load_datasamples(1 )
_A = SpeechTaFeatureExtractor()
_A = feature_extractor(audio_target=lowerCAmelCase_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
# using dfs for finding eulerian path traversal
def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any] , snake_case__ :Tuple=None) -> Dict:
_A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_A , _A = True, True
_A = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return path
def snake_case ( snake_case__ :List[Any] , snake_case__ :str) -> Tuple:
_A = 0
_A = -1
for i in range(snake_case__):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
_A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Any) -> Optional[int]:
_A = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
_A , _A = check_circuit_or_path(snake_case__ , snake_case__)
if check == 3:
print("""graph is not Eulerian""")
print("""no path""")
return
_A = 1
if check == 2:
_A = odd_node
print("""graph has a Euler path""")
if check == 1:
print("""graph has a Euler cycle""")
_A = dfs(snake_case__ , snake_case__ , snake_case__)
print(snake_case__)
def snake_case ( ) -> Any:
_A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_A = {
1: [],
2: []
# all degree is zero
}
_A = 10
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
if __name__ == "__main__":
main()
| 719 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 721 | _SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83 | 0 |
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = name
_A = value
_A = weight
def __repr__( self ) -> Tuple:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self ) -> List[str]:
return self.value
def UpperCAmelCase ( self ) -> Tuple:
return self.name
def UpperCAmelCase ( self ) -> Tuple:
return self.weight
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self.value / self.weight
def snake_case ( snake_case__ :Dict , snake_case__ :Union[str, Any] , snake_case__ :Tuple) -> List[str]:
_A = []
for i in range(len(snake_case__)):
menu.append(Things(name[i] , value[i] , weight[i]))
return menu
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any) -> List[str]:
_A = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__)
_A = []
_A , _A = 0.0, 0.0
for i in range(len(snake_case__)):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i])
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
def snake_case ( snake_case__ :Optional[Any]) -> List[Any]:
_A = 0
_A = len(snake_case__)
for i in range(n - 1):
for j in range(i + 1 , snake_case__):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case__ :int) -> Optional[Any]:
if len(snake_case__) <= 1:
return arr, 0
_A = len(snake_case__) // 2
_A = arr[0:mid]
_A = arr[mid:]
_A , _A = count_inversions_recursive(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
_A , _A = _count_cross_inversions(snake_case__ , snake_case__)
_A = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case__ :Tuple , snake_case__ :Dict) -> Dict:
_A = []
_A = _A = _A = 0
while i < len(snake_case__) and j < len(snake_case__):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(snake_case__):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def snake_case ( ) -> Optional[Any]:
_A = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , snake_case__)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__)
# an empty list should also have zero inversions
_A = []
_A = count_inversions_bf(snake_case__)
_A , _A = count_inversions_recursive(snake_case__)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , snake_case__)
if __name__ == "__main__":
main()
| 703 | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> List[Any]:
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Union[str, Any]:
_A = ()
for resnet in self.resnets:
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Optional[int]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_A = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Tuple:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Union[str, Any]:
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_A = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[Any]:
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> List[str]:
_A = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=36 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , lowerCAmelCase_=10_00 , ) -> Any:
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = patch_size
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = coordinate_size
_A = shape_size
_A = num_labels
_A = num_choices
_A = scope
_A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_A = text_seq_length
_A = (image_size // patch_size) ** 2 + 1
_A = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_A = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = tmp_coordinate
_A = tf.constant(lowerCAmelCase_ )
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.text_seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_A = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = TFLayoutLMvaModel(config=lowerCAmelCase_ )
# text + image
_A = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , )
_A = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_A = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_A = model({"""pixel_values""": pixel_values} , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = self.num_labels
_A = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ )
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self.num_labels
_A = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = 2
_A = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase :Any = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase :Optional[Any] = False
lowerCamelCase :List[Any] = False
lowerCamelCase :int = False
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return True
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> dict:
_A = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
_A = {
k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_A = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_A = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self ) -> List[str]:
_A = TFLayoutLMvaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
if getattr(lowerCAmelCase_ , """hf_compute_loss""" , lowerCAmelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_A = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0]
]
_A = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_A = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A = prepared_for_class.pop("""input_ids""" )
_A = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_A = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
_A = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_A = -1_00
_A = tf.convert_to_tensor(lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_A = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_A = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
# Get keys that were added with the _prepare_for_class function
_A = prepared_for_class.keys() - inputs_dict.keys()
_A = inspect.signature(model.call ).parameters
_A = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_A = {0: """input_ids"""}
for label_key in label_keys:
_A = signature_names.index(lowerCAmelCase_ )
_A = label_key
_A = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_A = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_A = prepared_for_class[value]
_A = tuple(lowerCAmelCase_ )
# Send to model
_A = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self ) -> Dict:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ) -> str:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""tf""" ).pixel_values
_A = tf.constant([[1, 2]] )
_A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_A = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_A = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
_A = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 705 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '<<<<<<< This should probably be modified because it mentions: '
_SCREAMING_SNAKE_CASE = '=======\n>>>>>>>\n'
_SCREAMING_SNAKE_CASE = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def snake_case ( snake_case__ :Namespace) -> int:
return ConvertCommand(args.tfds_path , args.datasets_directory)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> Any:
_A = get_logger("""datasets-cli/converting""" )
_A = tfds_path
_A = datasets_directory
def UpperCAmelCase ( self ) -> List[str]:
if os.path.isdir(self._tfds_path ):
_A = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_A = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_A = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_A = []
_A = []
_A = {}
if os.path.isdir(self._tfds_path ):
_A = os.listdir(lowerCAmelCase_ )
else:
_A = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if not os.path.isfile(lowerCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f:
_A = f.readlines()
_A = []
_A = False
_A = False
_A = []
for line in lines:
_A = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_A = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_A = """"""
continue
elif "from absl import logging" in out_line:
_A = """from datasets import logging\n"""
elif "getLogger" in out_line:
_A = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_A = True
_A = list(filter(lambda lowerCAmelCase_ : e in out_line , lowerCAmelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_ ) + """\n""" )
out_lines.append(lowerCAmelCase_ )
out_lines.append(lowerCAmelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_A = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_A = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCAmelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_A = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_A = True
out_lines.append(lowerCAmelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_A = f_name.replace(""".py""" , """""" )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_A = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase_ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(lowerCAmelCase_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_A = os.path.basename(lowerCAmelCase_ )
_A = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase_ , lowerCAmelCase_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.